You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2011/04/06 00:39:55 UTC

svn commit: r1089274 [1/2] - in /hadoop/hdfs/branches/HDFS-1052: ./ src/c++/libhdfs/ src/contrib/hdfsproxy/ src/java/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/java/org/apache/hadoop/hdfs/tool...

Author: suresh
Date: Tue Apr  5 22:39:54 2011
New Revision: 1089274

URL: http://svn.apache.org/viewvc?rev=1089274&view=rev
Log:
Merging changes from r1083902:r1085509 from trunk to federation

Modified:
    hadoop/hdfs/branches/HDFS-1052/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
    hadoop/hdfs/branches/HDFS-1052/build.xml   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/c++/libhdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/contrib/hdfsproxy/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/java/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsElement.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestListPathServlet.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
    hadoop/hdfs/branches/HDFS-1052/src/webapps/datanode/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs/   (props changed)
    hadoop/hdfs/branches/HDFS-1052/src/webapps/secondary/   (props changed)

Propchange: hadoop/hdfs/branches/HDFS-1052/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Apr  5 22:39:54 2011
@@ -1,4 +1,4 @@
 /hadoop/core/branches/branch-0.19/hdfs:713112
 /hadoop/hdfs/branches/HDFS-265:796829-820463
 /hadoop/hdfs/branches/branch-0.21:820487
-/hadoop/hdfs/trunk:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1035924,1036132,1036213,1036303,1036310,1036631,1036692,1036738,1036767,1038222-1038859,1039957,1040005,1040027-1060619,1061067,1062011-1065960,1066305,1066970,1066986,1067079-1076024,1076696,1078925,1078940,1079069-1080836,1081580-1083021,1083043,1083084,1083234,1083902
+/hadoop/hdfs/trunk:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1035924,1036132,1036213,1036303,1036310,1036631,1036692,1036738,1036767,1038222-1038859,1039957,1040005,1040027-1060619,1061067,1062011-1065960,1066305,1066970,1066986,1067079-1076024,1076696,1078925,1078940,1079069-1080836,1081580-1083021,1083043,1083084,1083234,1083902,1083951,1083958,1085460,1085509

Modified: hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/CHANGES.txt?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-1052/CHANGES.txt Tue Apr  5 22:39:54 2011
@@ -251,6 +251,10 @@ Trunk (unreleased changes)
     HDFS-1791. Federation: Add command to delete block pool directories 
     from a datanode. (jitendra)
 
+    HDFS-1785. In BlockReceiver and DataXceiver, clientName.length() is used
+    multiple times for determining whether the source is a client or a
+    datanode.  (szetszwo)
+
   IMPROVEMENTS
 
     HDFS-1510. Added test-patch.properties required by test-patch.sh (nigel)
@@ -307,8 +311,8 @@ Trunk (unreleased changes)
     HDFS-1763. Replace hard-coded option strings with variables from
     DFSConfigKeys. (eli)
 
-    HDFS-1521. Persist transaction ID on disk between NN restarts. (Ivan Kelly
-    and Todd Lipcon via todd)
+    HDFS-1541. Not marking datanodes dead when namenode in safemode.
+    (hairong)
 
   OPTIMIZATIONS
 
@@ -1812,12 +1816,8 @@ Release 0.21.0 - 2010-08-13
     HDFS-1598.  Directory listing on hftp:// does not show .*.crc files.
     (szetszwo)
 
-    HDFS-1413. Fix broken links to HDFS Wiki. (shv)
-
-    HDFS-1420. Clover build doesn't generate per-test coverage (cos)
-
-    HDFS-1598.  Directory listing on hftp:// does not show .*.crc files.
-    (szetszwo)
+    HDFS-1750. ListPathsServlet should not use HdfsFileStatus.getLocalName()
+    to get file name since it may return an empty string.  (szetszwo)
 
 Release 0.20.3 - 2011-1-5
 

Propchange: hadoop/hdfs/branches/HDFS-1052/build.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Apr  5 22:39:54 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/build.xml:779102
 /hadoop/hdfs/branches/HDFS-265/build.xml:796829-820463
 /hadoop/hdfs/branches/branch-0.21/build.xml:820487
-/hadoop/hdfs/trunk/build.xml:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1035924,1036132,1036213,1036303,1036310,1036631,1036692,1036767,1038222-1038859,1039957,1040005,1040027-1060619,1061067,1062011-1065960,1066305,1066970,1066986,1067079-1076024,1076696,1078925,1078940,1079069-1080836,1081580-1083021,1083043,1083084,1083234,1083902
+/hadoop/hdfs/trunk/build.xml:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1035924,1036132,1036213,1036303,1036310,1036631,1036692,1036767,1038222-1038859,1039957,1040005,1040027-1060619,1061067,1062011-1065960,1066305,1066970,1066986,1067079-1076024,1076696,1078925,1078940,1079069-1080836,1081580-1083021,1083043,1083084,1083234,1083902,1083951,1083958,1085460,1085509

Propchange: hadoop/hdfs/branches/HDFS-1052/src/c++/libhdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Apr  5 22:39:54 2011
@@ -1,3 +1,3 @@
 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
 /hadoop/core/trunk/src/c++/libhdfs:776175-784663
-/hadoop/hdfs/trunk/src/c++/libhdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1035924,1036132,1036213,1036303,1036310,1036631,1036692,1036767,1038222-1038859,1039957,1040005,1040027-1060619,1061067,1062011-1065960,1066305,1066970,1066986,1067079-1076024,1076696,1078925,1078940,1079069-1080836,1081580-1083021,1083043,1083084,1083234,1083902
+/hadoop/hdfs/trunk/src/c++/libhdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1035924,1036132,1036213,1036303,1036310,1036631,1036692,1036767,1038222-1038859,1039957,1040005,1040027-1060619,1061067,1062011-1065960,1066305,1066970,1066986,1067079-1076024,1076696,1078925,1078940,1079069-1080836,1081580-1083021,1083043,1083084,1083234,1083902,1083951,1083958,1085460,1085509

Propchange: hadoop/hdfs/branches/HDFS-1052/src/contrib/hdfsproxy/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Apr  5 22:39:54 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/src/contrib/hdfsproxy:776175-784663
 /hadoop/hdfs/branches/HDFS-265/src/contrib/hdfsproxy:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy:820487
-/hadoop/hdfs/trunk/src/contrib/hdfsproxy:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1035924,1036132,1036213,1036303,1036310,1036631,1036692,1036767,1038222-1038859,1039957,1040005,1040027-1060619,1061067,1062011-1065960,1066305,1066970,1066986,1067079-1076024,1076696,1078925,1078940,1079069-1080836,1081580-1083021,1083043,1083084,1083234,1083902
+/hadoop/hdfs/trunk/src/contrib/hdfsproxy:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1035924,1036132,1036213,1036303,1036310,1036631,1036692,1036767,1038222-1038859,1039957,1040005,1040027-1060619,1061067,1062011-1065960,1066305,1066970,1066986,1067079-1076024,1076696,1078925,1078940,1079069-1080836,1081580-1083021,1083043,1083084,1083234,1083902,1083951,1083958,1085460,1085509

Propchange: hadoop/hdfs/branches/HDFS-1052/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Apr  5 22:39:54 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
 /hadoop/hdfs/branches/HDFS-265/src/java:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/java:820487
-/hadoop/hdfs/trunk/src/java:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1035924,1036132,1036213,1036303,1036310,1036631,1036692,1036767,1038222-1038859,1039957,1040005,1040027-1060619,1061067,1062011-1065960,1066305,1066970,1066986,1067079-1076024,1076696,1078925,1078940,1079069-1080836,1081580-1083021,1083043,1083084,1083234,1083902
+/hadoop/hdfs/trunk/src/java:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1035924,1036132,1036213,1036303,1036310,1036631,1036692,1036767,1038222-1038859,1039957,1040005,1040027-1060619,1061067,1062011-1065960,1066305,1066970,1066986,1067079-1076024,1076696,1078925,1078940,1079069-1080836,1081580-1083021,1083043,1083084,1083234,1083902,1083951,1083958,1085460,1085509

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Tue Apr  5 22:39:54 2011
@@ -35,14 +35,14 @@ import java.util.zip.Checksum;
 
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.fs.FSOutputSummer;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
@@ -60,7 +60,6 @@ class BlockReceiver implements java.io.C
   public static final Log LOG = DataNode.LOG;
   static final Log ClientTraceLog = DataNode.ClientTraceLog;
   
-  private ExtendedBlock block; // the block to receive
   private DataInputStream in = null; // from where data are read
   private DataChecksum checksum; // from where chunks of a block can be read
   private OutputStream out = null; // to block file at local disk
@@ -78,28 +77,41 @@ class BlockReceiver implements java.io.C
   private Daemon responder = null;
   private DataTransferThrottler throttler;
   private FSDataset.BlockWriteStreams streams;
-  private String clientName;
-  DatanodeInfo srcDataNode = null;
+  private DatanodeInfo srcDataNode = null;
   private Checksum partialCrc = null;
   private final DataNode datanode;
-  private final BlockConstructionStage initialStage;
-  final private ReplicaInPipelineInterface replicaInfo;
   volatile private boolean mirrorError;
 
-  BlockReceiver(ExtendedBlock inBlock, DataInputStream in, String inAddr,
-                String myAddr, BlockConstructionStage stage, 
-                long newGs, long minBytesRcvd, long maxBytesRcvd, 
-                String clientName, DatanodeInfo srcDataNode, DataNode datanode)
-                throws IOException {
+  /** The client name.  It is empty if a datanode is the client */
+  private final String clientname;
+  private final boolean isClient; 
+  private final boolean isDatanode; 
+
+  /** the block to receive */
+  private final ExtendedBlock block; 
+  /** the replica to write */
+  private final ReplicaInPipelineInterface replicaInfo;
+  /** pipeline stage */
+  private final BlockConstructionStage initialStage;
+
+  BlockReceiver(final ExtendedBlock block, final DataInputStream in,
+      final String inAddr, final String myAddr,
+      final BlockConstructionStage stage, 
+      final long newGs, final long minBytesRcvd, final long maxBytesRcvd, 
+      final String clientname, final DatanodeInfo srcDataNode,
+      final DataNode datanode) throws IOException {
     try{
-      this.block = inBlock;
+      this.block = block;
       this.in = in;
       this.inAddr = inAddr;
       this.myAddr = myAddr;
-      this.clientName = clientName;
       this.srcDataNode = srcDataNode;
       this.datanode = datanode;
-      
+
+      this.clientname = clientname;
+      this.isDatanode = clientname.length() == 0;
+      this.isClient = !this.isDatanode;
+
       //for datanode, we have
       //1: clientName.length() == 0, and
       //2: stage == null, PIPELINE_SETUP_CREATE or TRANSFER_RBW
@@ -107,7 +119,7 @@ class BlockReceiver implements java.io.C
       //
       // Open local disk out
       //
-      if (clientName.length() == 0) { //replication or move
+      if (isDatanode) { //replication or move
         replicaInfo = datanode.data.createTemporary(block);
       } else {
         switch (stage) {
@@ -144,8 +156,8 @@ class BlockReceiver implements java.io.C
       this.bytesPerChecksum = checksum.getBytesPerChecksum();
       this.checksumSize = checksum.getChecksumSize();
       
-      boolean isCreate = stage == BlockConstructionStage.PIPELINE_SETUP_CREATE 
-      || clientName.length() == 0;
+      final boolean isCreate = isDatanode 
+          || stage == BlockConstructionStage.PIPELINE_SETUP_CREATE;
       streams = replicaInfo.createStreams(isCreate,
           this.bytesPerChecksum, this.checksumSize);
       if (streams != null) {
@@ -532,7 +544,7 @@ class BlockReceiver implements java.io.C
        * protocol includes acks and only the last datanode needs to verify 
        * checksum.
        */
-      if (mirrorOut == null || clientName.length() == 0) {
+      if (mirrorOut == null || isDatanode) {
         verifyChunks(pktBuf, dataOff, len, pktBuf, checksumOff);
       }
 
@@ -631,7 +643,7 @@ class BlockReceiver implements java.io.C
       throttler = throttlerArg;
 
     try {
-      if (clientName.length() > 0) {
+      if (isClient) {
         responder = new Daemon(datanode.threadGroup, 
             new PacketResponder(this, block, mirrIn, replyOut, 
                                 numTargets, Thread.currentThread()));
@@ -654,7 +666,7 @@ class BlockReceiver implements java.io.C
       // if this write is for a replication request (and not
       // from a client), then finalize block. For client-writes, 
       // the block is finalized in the PacketResponder.
-      if (clientName.length() == 0) {
+      if (isDatanode) {
         // close the block/crc files
         close();
 
@@ -693,7 +705,7 @@ class BlockReceiver implements java.io.C
    * if this write is for a replication request (and not from a client)
    */
   private void cleanupBlock() throws IOException {
-    if (clientName.length() == 0
+    if (isDatanode
         && initialStage != BlockConstructionStage.TRANSFER_RBW) {
       datanode.data.unfinalizeBlock(block);
     }
@@ -930,14 +942,13 @@ class BlockReceiver implements java.io.C
               block.setNumBytes(replicaInfo.getNumBytes());
               datanode.data.finalizeBlock(block);
               datanode.closeBlock(block, DataNode.EMPTY_DEL_HINT);
-              if (ClientTraceLog.isInfoEnabled() &&
-                  receiver.clientName.length() > 0) {
+              if (ClientTraceLog.isInfoEnabled() && isClient) {
                 long offset = 0;
                 DatanodeRegistration dnR = 
                   datanode.getDNRegistrationForBP(block.getBlockPoolId());
                 ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT,
                       receiver.inAddr, receiver.myAddr, block.getNumBytes(),
-                      "HDFS_WRITE", receiver.clientName, offset,
+                      "HDFS_WRITE", receiver.clientname, offset,
                       dnR.getStorageID(), block, endTime-startTime));
               } else {
                 LOG.info("Received block " + block + 

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Tue Apr  5 22:39:54 2011
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.da
 import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.ERROR;
 import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.ERROR_ACCESS_TOKEN;
 import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.SUCCESS;
+import static org.apache.hadoop.hdfs.server.common.Util.now;
 import static org.apache.hadoop.hdfs.server.datanode.DataNode.DN_CLIENTTRACE_FORMAT;
 
 import java.io.BufferedInputStream;
@@ -34,14 +35,13 @@ import java.net.SocketException;
 
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.io.IOUtils;
@@ -50,8 +50,8 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
 import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
 
@@ -226,12 +226,15 @@ class DataXceiver extends DataTransferPr
    * Write a block to disk.
    */
   @Override
-  protected void opWriteBlock(DataInputStream in, ExtendedBlock block, 
-      int pipelineSize, BlockConstructionStage stage,
-      long newGs, long minBytesRcvd, long maxBytesRcvd,
-      String client, DatanodeInfo srcDataNode, DatanodeInfo[] targets,
-      Token<BlockTokenIdentifier> blockToken) throws IOException {
-    updateCurrentThreadName("Receiving block " + block + " client=" + client);
+  protected void opWriteBlock(final DataInputStream in, final ExtendedBlock block, 
+      final int pipelineSize, final BlockConstructionStage stage,
+      final long newGs, final long minBytesRcvd, final long maxBytesRcvd,
+      final String clientname, final DatanodeInfo srcDataNode,
+      final DatanodeInfo[] targets, final Token<BlockTokenIdentifier> blockToken
+      ) throws IOException {
+    updateCurrentThreadName("Receiving block " + block + " client=" + clientname);
+    final boolean isDatanode = clientname.length() == 0;
+    final boolean isClient = !isDatanode;
 
     if (LOG.isDebugEnabled()) {
       LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() +
@@ -259,7 +262,7 @@ class DataXceiver extends DataTransferPr
             BlockTokenSecretManager.AccessMode.WRITE);
       } catch (InvalidToken e) {
         try {
-          if (client.length() != 0) {
+          if (isClient) {
             ERROR_ACCESS_TOKEN.write(replyOut);
             Text.writeString(replyOut, dnR.getName());
             replyOut.flush();
@@ -282,14 +285,14 @@ class DataXceiver extends DataTransferPr
     String firstBadLink = "";           // first datanode that failed in connection setup
     DataTransferProtocol.Status mirrorInStatus = SUCCESS;
     try {
-      if (client.length() == 0 || 
+      if (isDatanode || 
           stage != BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
         // open a block receiver
         blockReceiver = new BlockReceiver(block, in, 
             s.getRemoteSocketAddress().toString(),
             s.getLocalSocketAddress().toString(),
             stage, newGs, minBytesRcvd, maxBytesRcvd,
-            client, srcDataNode, datanode);
+            clientname, srcDataNode, datanode);
       } else {
         datanode.data.recoverClose(block, newGs, minBytesRcvd);
       }
@@ -320,7 +323,7 @@ class DataXceiver extends DataTransferPr
 
           // Write header: Copied from DFSClient.java!
           DataTransferProtocol.Sender.opWriteBlock(mirrorOut, originalBlock,
-              pipelineSize, stage, newGs, minBytesRcvd, maxBytesRcvd, client,
+              pipelineSize, stage, newGs, minBytesRcvd, maxBytesRcvd, clientname,
               srcDataNode, targets, blockToken);
 
           if (blockReceiver != null) { // send checksum header
@@ -329,7 +332,7 @@ class DataXceiver extends DataTransferPr
           mirrorOut.flush();
 
           // read connect ack (only for clients, not for replication req)
-          if (client.length() != 0) {
+          if (isClient) {
             mirrorInStatus = DataTransferProtocol.Status.read(mirrorIn);
             firstBadLink = Text.readString(mirrorIn);
             if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
@@ -341,7 +344,7 @@ class DataXceiver extends DataTransferPr
           }
 
         } catch (IOException e) {
-          if (client.length() != 0) {
+          if (isClient) {
             ERROR.write(replyOut);
             Text.writeString(replyOut, mirrorNode);
             replyOut.flush();
@@ -352,7 +355,7 @@ class DataXceiver extends DataTransferPr
           mirrorIn = null;
           IOUtils.closeSocket(mirrorSock);
           mirrorSock = null;
-          if (client.length() > 0) {
+          if (isClient) {
             throw e;
           } else {
             LOG.info(dnR + ":Exception transfering block " +
@@ -364,7 +367,7 @@ class DataXceiver extends DataTransferPr
       }
 
       // send connect ack back to source (only for clients)
-      if (client.length() != 0) {
+      if (isClient) {
         if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
           LOG.info("Datanode " + targets.length +
                    " forwarding connect ack to upstream firstbadlink is " +
@@ -383,7 +386,7 @@ class DataXceiver extends DataTransferPr
       }
 
       // update its generation stamp
-      if (client.length() != 0 && 
+      if (isClient && 
           stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
         block.setGenerationStamp(newGs);
         block.setNumBytes(minBytesRcvd);
@@ -392,7 +395,7 @@ class DataXceiver extends DataTransferPr
       // if this write is for a replication request or recovering
       // a failed close for client, then confirm block. For other client-writes,
       // the block is finalized in the PacketResponder.
-      if ((client.length() == 0 && stage != BlockConstructionStage.TRANSFER_RBW)
+      if ((isDatanode  && stage != BlockConstructionStage.TRANSFER_RBW)
           ||
           stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
         datanode.closeBlock(block, DataNode.EMPTY_DEL_HINT);

Propchange: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Apr  5 22:39:54 2011
@@ -4,4 +4,4 @@
 /hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:776175-785643,785929-786278
 /hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:820487
-/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1035924,1036132,1036213,1036303,1036310,1036631,1036692,1036767,1038222-1038859,1039957,1040005,1040027-1060619,1061067,1062011-1065960,1066305,1066970,1066986,1067079-1076024,1076696,1078925,1078940,1079069-1080836,1081580-1083021,1083043,1083084,1083234,1083902
+/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1035924,1036132,1036213,1036303,1036310,1036631,1036692,1036767,1038222-1038859,1039957,1040005,1040027-1060619,1061067,1062011-1065960,1066305,1066970,1066986,1067079-1076024,1076696,1078925,1078940,1079069-1080836,1081580-1083021,1083043,1083084,1083234,1083902,1083951,1083958,1085460,1085509

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java Tue Apr  5 22:39:54 2011
@@ -55,7 +55,6 @@ public class BackupImage extends FSImage
 
   /** Is journal spooling in progress */
   volatile JSpoolState jsState;
-  private long lastAppliedTxId = 0;
 
   static enum JSpoolState {
     OFF,
@@ -154,11 +153,6 @@ public class BackupImage extends FSImage
     if(!editLog.isOpen())
       editLog.open();
 
-    // set storage fields
-    storage.setStorageInfo(sig);
-    storage.setImageDigest(sig.getImageDigest());
-    storage.setCheckpointTime(sig.checkpointTime);
-
     FSDirectory fsDir = getFSNamesystem().dir;
     if(fsDir.isEmpty()) {
       Iterator<StorageDirectory> itImage
@@ -169,7 +163,6 @@ public class BackupImage extends FSImage
         throw new IOException("Could not locate checkpoint directories");
       StorageDirectory sdName = itImage.next();
       StorageDirectory sdEdits = itEdits.next();
-
       getFSDirectoryRootLock().writeLock();
       try { // load image under rootDir lock
         loadFSImage(NNStorage.getStorageFile(sdName, NameNodeFile.IMAGE));
@@ -177,8 +170,12 @@ public class BackupImage extends FSImage
         getFSDirectoryRootLock().writeUnlock();
       }
       loadFSEdits(sdEdits);
-      lastAppliedTxId = getEditLog().getLastWrittenTxId();
     }
+
+    // set storage fields
+    storage.setStorageInfo(sig);
+    storage.setImageDigest(sig.imageDigest);
+    storage.setCheckpointTime(sig.checkpointTime);
   }
 
   /**
@@ -230,8 +227,7 @@ public class BackupImage extends FSImage
           backupInputStream.setBytes(data);
           FSEditLogLoader logLoader = new FSEditLogLoader(namesystem);
           logLoader.loadEditRecords(storage.getLayoutVersion(),
-              backupInputStream.getDataInputStream(), true,
-              lastAppliedTxId + 1);
+                    backupInputStream.getDataInputStream(), true);
           getFSNamesystem().dir.updateCountForINodeWithQuota(); // inefficient!
           break;
         case INPROGRESS:
@@ -352,18 +348,12 @@ public class BackupImage extends FSImage
       EditLogFileInputStream edits = new EditLogFileInputStream(jSpoolFile);
       DataInputStream in = edits.getDataInputStream();
       FSEditLogLoader logLoader = new FSEditLogLoader(namesystem);
-      int loaded = logLoader.loadFSEdits(in, false, lastAppliedTxId + 1);
-
-      lastAppliedTxId += loaded;
-      numEdits += loaded;
+      numEdits += logLoader.loadFSEdits(in, false);
 
       // first time reached the end of spool
       jsState = JSpoolState.WAIT;
-      loaded = logLoader.loadEditRecords(storage.getLayoutVersion(),
-                                         in, true, lastAppliedTxId + 1);
-      numEdits += loaded;
-      lastAppliedTxId += loaded;
-
+      numEdits += logLoader.loadEditRecords(storage.getLayoutVersion(),
+                                            in, true);
       getFSNamesystem().dir.updateCountForINodeWithQuota();
       edits.close();
     }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java Tue Apr  5 22:39:54 2011
@@ -51,18 +51,15 @@ class EditLogBackupOutputStream extends 
 
   static class JournalRecord {
     byte op;
-    long txid;
     Writable[] args;
 
-    JournalRecord(byte op, long txid, Writable ... writables) {
+    JournalRecord(byte op, Writable ... writables) {
       this.op = op;
-      this.txid = txid;
       this.args = writables;
     }
 
     void write(DataOutputStream out) throws IOException {
       out.write(op);
-      out.writeLong(txid);
       if(args == null)
         return;
       for(Writable w : args)
@@ -108,8 +105,8 @@ class EditLogBackupOutputStream extends 
   }
 
   @Override // EditLogOutputStream
-  void write(byte op, long txid, Writable ... writables) throws IOException {
-    bufCurrent.add(new JournalRecord(op, txid, writables));
+  void write(byte op, Writable ... writables) throws IOException {
+    bufCurrent.add(new JournalRecord(op, writables));
   }
 
   /**

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java Tue Apr  5 22:39:54 2011
@@ -81,18 +81,10 @@ class EditLogFileOutputStream extends Ed
     bufCurrent.write(b);
   }
 
-  /**
-   * Write a transaction to the stream. The serialization format is:
-   * <ul>
-   *   <li>the opcode (byte)</li>
-   *   <li>the transaction id (long)</li>
-   *   <li>the actual Writables for the transaction</li>
-   * </ul>
-   * */
+  /** {@inheritDoc} */
   @Override
-  void write(byte op, long txid, Writable... writables) throws IOException {
+  void write(byte op, Writable... writables) throws IOException {
     write(op);
-    bufCurrent.writeLong(txid);
     for (Writable w : writables) {
       w.write(bufCurrent);
     }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java Tue Apr  5 22:39:54 2011
@@ -46,12 +46,10 @@ implements JournalStream {
    * an array of Writable arguments.
    * 
    * @param op operation
-   * @param txid the transaction ID of this operation
    * @param writables array of Writable arguments
    * @throws IOException
    */
-  abstract void write(byte op, long txid, Writable ... writables)
-  throws IOException;
+  abstract void write(byte op, Writable ... writables) throws IOException;
 
   /**
    * Create and initialize underlying persistent edits log storage.

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Tue Apr  5 22:39:54 2011
@@ -306,19 +306,12 @@ public class FSEditLog implements NNStor
       if(getNumEditStreams() == 0)
         throw new java.lang.IllegalStateException(NO_JOURNAL_STREAMS_WARNING);
       ArrayList<EditLogOutputStream> errorStreams = null;
-
-      // Only start a new transaction for OPs which will be persisted to disk.
-      // Obviously this excludes control op codes.
       long start = now();
-      if (opCode.getOpCode() < FSEditLogOpCodes.OP_JSPOOL_START.getOpCode()) {
-        start = beginTransaction();
-      }
-
       for(EditLogOutputStream eStream : editStreams) {
         if(!eStream.isOperationSupported(opCode.getOpCode()))
           continue;
         try {
-          eStream.write(opCode.getOpCode(), txid, writables);
+          eStream.write(opCode.getOpCode(), writables);
         } catch (IOException ie) {
           LOG.error("logEdit: removing "+ eStream.getName(), ie);
           if(errorStreams == null)
@@ -327,7 +320,7 @@ public class FSEditLog implements NNStor
         }
       }
       disableAndReportErrorOnStreams(errorStreams);
-      endTransaction(start);
+      recordTransaction(start);
       
       // check if it is time to schedule an automatic sync
       if (!shouldForceSync()) {
@@ -378,8 +371,7 @@ public class FSEditLog implements NNStor
     return false;
   }
   
-  private long beginTransaction() {
-    assert Thread.holdsLock(this);
+  private void recordTransaction(long start) {
     // get a new transactionId
     txid++;
 
@@ -388,12 +380,7 @@ public class FSEditLog implements NNStor
     //
     TransactionId id = myTransactionId.get();
     id.txid = txid;
-    return now();
-  }
-  
-  private void endTransaction(long start) {
-    assert Thread.holdsLock(this);
-    
+
     // update statistics
     long end = now();
     numTransactions++;
@@ -403,21 +390,6 @@ public class FSEditLog implements NNStor
   }
 
   /**
-   * Return the transaction ID of the last transaction written to the log.
-   */
-  synchronized long getLastWrittenTxId() {
-    return txid;
-  }
-  
-  /**
-   * Set the transaction ID to use for the next transaction written.
-   */
-  synchronized void setNextTxId(long nextTxid) {
-    assert synctxid <= txid;
-    txid = nextTxid - 1;
-  }
-  
-  /**
    * Blocks until all ongoing edits have been synced to disk.
    * This differs from logSync in that it waits for edits that have been
    * written by other threads, not just edits from the calling thread.
@@ -812,8 +784,6 @@ public class FSEditLog implements NNStor
   
   /**
    * Closes the current edit log and opens edits.new. 
-   * @return the transaction id that will be used as the first transaction
-   *         in the new log
    */
   synchronized void rollEditLog() throws IOException {
     waitForSyncToFinish();
@@ -1044,7 +1014,7 @@ public class FSEditLog implements NNStor
     if(getNumEditStreams() == 0)
       throw new java.lang.IllegalStateException(NO_JOURNAL_STREAMS_WARNING);
     ArrayList<EditLogOutputStream> errorStreams = null;
-    long start = beginTransaction();
+    long start = now();
     for(EditLogOutputStream eStream : editStreams) {
       try {
         eStream.write(data, 0, length);
@@ -1056,7 +1026,7 @@ public class FSEditLog implements NNStor
       }
     }
     disableAndReportErrorOnStreams(errorStreams);
-    endTransaction(start);
+    recordTransaction(start);
   }
 
   /**

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Tue Apr  5 22:39:54 2011
@@ -53,20 +53,17 @@ public class FSEditLogLoader {
    * This is where we apply edits that we've been writing to disk all
    * along.
    */
-  int loadFSEdits(EditLogInputStream edits, long expectedStartingTxId)
-  throws IOException {
+  int loadFSEdits(EditLogInputStream edits) throws IOException {
     DataInputStream in = edits.getDataInputStream();
     long startTime = now();
-    int numEdits = loadFSEdits(in, true, expectedStartingTxId);
+    int numEdits = loadFSEdits(in, true);
     FSImage.LOG.info("Edits file " + edits.getName() 
         + " of size " + edits.length() + " edits # " + numEdits 
         + " loaded in " + (now()-startTime)/1000 + " seconds.");
     return numEdits;
   }
 
-  int loadFSEdits(DataInputStream in, boolean closeOnExit,
-      long expectedStartingTxId)
-  throws IOException {
+  int loadFSEdits(DataInputStream in, boolean closeOnExit) throws IOException {
     int numEdits = 0;
     int logVersion = 0;
 
@@ -92,19 +89,19 @@ public class FSEditLogLoader {
       }
       assert logVersion <= Storage.LAST_UPGRADABLE_LAYOUT_VERSION :
                             "Unsupported version " + logVersion;
-      
-      numEdits = loadEditRecords(logVersion, in, false, expectedStartingTxId);
+      numEdits = loadEditRecords(logVersion, in, false);
     } finally {
       if(closeOnExit)
         in.close();
     }
-    
+    if (logVersion != FSConstants.LAYOUT_VERSION) // other version
+      numEdits++; // save this image asap
     return numEdits;
   }
 
   @SuppressWarnings("deprecation")
   int loadEditRecords(int logVersion, DataInputStream in,
-      boolean closeOnExit, long expectedStartingTxId) throws IOException {
+      boolean closeOnExit) throws IOException {
     FSDirectory fsDir = fsNamesys.dir;
     int numEdits = 0;
     String clientName = null;
@@ -119,8 +116,6 @@ public class FSEditLogLoader {
         numOpUpdateMasterKey = 0, numOpOther = 0;
 
     try {
-      long txId = expectedStartingTxId - 1;
-
       while (true) {
         long timestamp = 0;
         long mtime = 0;
@@ -138,17 +133,6 @@ public class FSEditLogLoader {
         } catch (EOFException e) {
           break; // no more transactions
         }
-
-        if (logVersion <= -28) {
-          // Read the txid
-          long thisTxId = in.readLong();
-          if (thisTxId != txId + 1) {
-            throw new IOException("Expected transaction ID " +
-                (txId + 1) + " but got " + thisTxId);
-          }
-          txId = thisTxId;
-        }
-        
         numEdits++;
         switch (opCode) {
         case OP_ADD:

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Tue Apr  5 22:39:54 2011
@@ -167,7 +167,7 @@ public class FSImage implements NNStorag
       storage.setUpgradeManager(ns.upgradeManager);
     }
   }
- 
+
   void setCheckpointDirectories(Collection<URI> dirs,
                                 Collection<URI> editsDirs) {
     checkpointDirs = dirs;
@@ -661,22 +661,16 @@ public class FSImage implements NNStorag
     //
     // Load in bits
     //
-    latestEditsSD.read();
-    long editsVersion = storage.getLayoutVersion();
     latestNameSD.read();
-    long imageVersion = storage.getLayoutVersion();
-
-    loadFSImage(NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE));
+    needToSave |= loadFSImage(NNStorage.getStorageFile(latestNameSD,
+                                                       NameNodeFile.IMAGE));
     
     // Load latest edits
-    if (latestNameCheckpointTime > latestEditsCheckpointTime) {
+    if (latestNameCheckpointTime > latestEditsCheckpointTime)
       // the image is already current, discard edits
       needToSave |= true;
-    } else { // latestNameCheckpointTime == latestEditsCheckpointTime
-      needToSave |= loadFSEdits(latestEditsSD);
-    }
-    needToSave |= (editsVersion != FSConstants.LAYOUT_VERSION 
-                    || imageVersion != FSConstants.LAYOUT_VERSION);
+    else // latestNameCheckpointTime == latestEditsCheckpointTime
+      needToSave |= (loadFSEdits(latestEditsSD) > 0);
     
     return needToSave;
   }
@@ -686,11 +680,10 @@ public class FSImage implements NNStorag
    * filenames and blocks.  Return whether we should
    * "re-save" and consolidate the edit-logs
    */
-  void loadFSImage(File curFile) throws IOException {
+  boolean loadFSImage(File curFile) throws IOException {
     FSImageFormat.Loader loader = new FSImageFormat.Loader(
         conf, getFSNamesystem());
     loader.load(curFile);
-
     namesystem.setBlockPoolId(this.getBlockPoolID());
 
     // Check that the image digest we loaded matches up with what
@@ -703,42 +696,44 @@ public class FSImage implements NNStorag
           " is corrupt with MD5 checksum of " + readImageMd5 +
           " but expecting " + storage.getImageDigest());
     }
-    storage.setCheckpointTxId(loader.getLoadedImageTxId());
+
+    storage.namespaceID = loader.getLoadedNamespaceID();
+    storage.layoutVersion = loader.getLoadedImageVersion();
+
+    boolean needToSave =
+      loader.getLoadedImageVersion() != FSConstants.LAYOUT_VERSION;
+    return needToSave;
   }
 
   /**
    * Load and merge edits from two edits files
    * 
    * @param sd storage directory
-   * @return true if the image should be re-saved
+   * @return number of edits loaded
    * @throws IOException
    */
-  boolean loadFSEdits(StorageDirectory sd) throws IOException {
+  int loadFSEdits(StorageDirectory sd) throws IOException {
     FSEditLogLoader loader = new FSEditLogLoader(namesystem);
     
+    int numEdits = 0;
     EditLogFileInputStream edits =
       new EditLogFileInputStream(NNStorage.getStorageFile(sd,
                                                           NameNodeFile.EDITS));
-    long startingTxId = storage.getCheckpointTxId() + 1;
-    long numLoaded = loader.loadFSEdits(edits, startingTxId);
-    startingTxId += numLoaded;
-
+    
+    numEdits = loader.loadFSEdits(edits);
     edits.close();
     File editsNew = NNStorage.getStorageFile(sd, NameNodeFile.EDITS_NEW);
     
     if (editsNew.exists() && editsNew.length() > 0) {
       edits = new EditLogFileInputStream(editsNew);
-      numLoaded += loader.loadFSEdits(edits, startingTxId);
+      numEdits += loader.loadFSEdits(edits);
       edits.close();
     }
     
     // update the counts.
     getFSNamesystem().dir.updateCountForINodeWithQuota();    
     
-    // update the txid for the edit log
-    editLog.setNextTxId(storage.getCheckpointTxId() + numLoaded + 1);
-    
-    return numLoaded > 0;
+    return numEdits;
   }
 
   /**
@@ -749,7 +744,6 @@ public class FSImage implements NNStorag
     FSImageCompression compression = FSImageCompression.createCompression(conf);
     saver.save(newFile, getFSNamesystem(), compression);
     storage.setImageDigest(saver.getSavedDigest());
-    storage.setCheckpointTxId(editLog.getLastWrittenTxId());
   }
 
   /**

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Tue Apr  5 22:39:54 2011
@@ -40,7 +40,6 @@ import org.apache.hadoop.fs.permission.P
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
 
@@ -68,8 +67,10 @@ class FSImageFormat {
     /** Set to true once a file has been loaded using this loader. */
     private boolean loaded = false;
 
-    /** The transaction ID of the last edit represented by the loaded file */
-    private long imgTxId;
+    /** The image version of the loaded file */
+    private int imgVersion;
+    /** The namespace ID of the loaded file */
+    private int imgNamespaceID;
     /** The MD5 sum of the loaded file */
     private MD5Hash imgDigest;
 
@@ -79,6 +80,15 @@ class FSImageFormat {
     }
 
     /**
+     * Return the version number of the image that has been loaded.
+     * @throws IllegalStateException if load() has not yet been called.
+     */
+    int getLoadedImageVersion() {
+      checkLoaded();
+      return imgVersion;
+    }
+    
+    /**
      * Return the MD5 checksum of the image that has been loaded.
      * @throws IllegalStateException if load() has not yet been called.
      */
@@ -87,9 +97,13 @@ class FSImageFormat {
       return imgDigest;
     }
 
-    long getLoadedImageTxId() {
+    /**
+     * Return the namespace ID of the image that has been loaded.
+     * @throws IllegalStateException if load() has not yet been called.
+     */
+    int getLoadedNamespaceID() {
       checkLoaded();
-      return imgTxId;
+      return imgNamespaceID;
     }
 
     /**
@@ -138,14 +152,10 @@ class FSImageFormat {
          * it should not contain version and namespace fields
          */
         // read image version: first appeared in version -1
-        long imgVersion = in.readInt();
-        if(getLayoutVersion() != imgVersion)
-          throw new InconsistentFSStateException(curFile, 
-              "imgVersion " + imgVersion +
-              " expected to be " + getLayoutVersion());
+        imgVersion = in.readInt();
 
         // read namespaceID: first appeared in version -2
-        in.readInt();
+        imgNamespaceID = in.readInt();
 
         // read number of files
         long numFiles = readNumFiles(in);
@@ -155,15 +165,6 @@ class FSImageFormat {
           long genstamp = in.readLong();
           namesystem.setGenerationStamp(genstamp); 
         }
-        
-        // read the transaction ID of the last edit represented by
-        // this image
-        if (imgVersion <= -28) {
-          imgTxId = in.readLong();
-        } else {
-          imgTxId = 0;
-        }
-        
 
         // read compression related info
         FSImageCompression compression;
@@ -255,12 +256,11 @@ class FSImageFormat {
    * @return an inode
    */
   private INode loadINode(DataInputStream in)
-      throws IOException {
+  throws IOException {
     long modificationTime = 0;
     long atime = 0;
     long blockSize = 0;
     
-    long imgVersion = getLayoutVersion();
     short replication = in.readShort();
     replication = namesystem.adjustReplication(replication);
     modificationTime = in.readLong();
@@ -326,10 +326,7 @@ class FSImageFormat {
           modificationTime, atime, nsQuota, dsQuota, blockSize);
     }
 
-    private void loadDatanodes(DataInputStream in)
-        throws IOException {
-      long imgVersion = getLayoutVersion();
-
+    private void loadDatanodes(DataInputStream in) throws IOException {
       if (imgVersion > -3) // pre datanode image version
         return;
       if (imgVersion <= -12) {
@@ -345,7 +342,6 @@ class FSImageFormat {
     private void loadFilesUnderConstruction(DataInputStream in)
     throws IOException {
       FSDirectory fsDir = namesystem.dir;
-      long imgVersion = getLayoutVersion();
       if (imgVersion > -13) // pre lease image version
         return;
       int size = in.readInt();
@@ -371,10 +367,7 @@ class FSImageFormat {
       }
     }
 
-    private void loadSecretManagerState(DataInputStream in)
-        throws IOException {
-      long imgVersion = getLayoutVersion();
-
+    private void loadSecretManagerState(DataInputStream in) throws IOException {
       if (imgVersion > -23) {
         //SecretManagerState is not available.
         //This must not happen if security is turned on.
@@ -383,14 +376,8 @@ class FSImageFormat {
       namesystem.loadSecretManagerState(in);
     }
 
-    private long getLayoutVersion() {
-      return namesystem.getFSImage().getStorage().getLayoutVersion();
-    }
-
-    private long readNumFiles(DataInputStream in)
-        throws IOException {
-      long imgVersion = getLayoutVersion();
 
+    private long readNumFiles(DataInputStream in) throws IOException {
       if (imgVersion <= -16) {
         return in.readLong();
       } else {
@@ -485,12 +472,9 @@ class FSImageFormat {
       DataOutputStream out = new DataOutputStream(fos);
       try {
         out.writeInt(FSConstants.LAYOUT_VERSION);
-        out.writeInt(sourceNamesystem.getFSImage()
-                     .getStorage().getNamespaceID()); // TODO bad dependency
+        out.writeInt(sourceNamesystem.getFSImage().getStorage().getNamespaceID()); // TODO bad dependency
         out.writeLong(fsDir.rootDir.numItemsInTree());
         out.writeLong(sourceNamesystem.getGenerationStamp());
-        long txid = sourceNamesystem.getEditLog().getLastWrittenTxId();
-        out.writeLong(txid);
 
         // write compression info and set up compressed stream
         out = compression.writeHeaderAndWrapStream(fos);

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Tue Apr  5 22:39:54 2011
@@ -3074,6 +3074,10 @@ public class FSNamesystem implements FSC
    * effect causes more datanodes to be declared dead.
    */
   void heartbeatCheck() {
+    if (isInSafeMode()) {
+      // not to check dead nodes if in safemode
+      return;
+    }
     boolean allAlive = false;
     while (!allAlive) {
       boolean foundDead = false;

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java Tue Apr  5 22:39:54 2011
@@ -63,10 +63,11 @@ public class ListPathsServlet extends Df
    * Node information includes path, modification, permission, owner and group.
    * For files, it also includes size, replication and block-size. 
    */
-  static void writeInfo(String parent, HdfsFileStatus i, XMLOutputter doc) throws IOException {
+  static void writeInfo(final Path fullpath, final HdfsFileStatus i,
+      final XMLOutputter doc) throws IOException {
     final SimpleDateFormat ldf = df.get();
     doc.startTag(i.isDir() ? "directory" : "file");
-    doc.attribute("path", i.getFullPath(new Path(parent)).toUri().getPath());
+    doc.attribute("path", fullpath.toUri().getPath());
     doc.attribute("modified", ldf.format(new Date(i.getModificationTime())));
     doc.attribute("accesstime", ldf.format(new Date(i.getAccessTime())));
     if (!i.isDir()) {
@@ -154,7 +155,7 @@ public class ListPathsServlet extends Df
 
           HdfsFileStatus base = nn.getFileInfo(path);
           if ((base != null) && base.isDir()) {
-            writeInfo(path, base, doc);
+            writeInfo(base.getFullPath(new Path(path)), base, doc);
           }
 
           Stack<String> pathstack = new Stack<String>();
@@ -177,7 +178,8 @@ public class ListPathsServlet extends Df
                 }
                 HdfsFileStatus[] listing = thisListing.getPartialListing();
                 for (HdfsFileStatus i : listing) {
-                  String localName = i.getLocalName();
+                  final Path fullpath = i.getFullPath(new Path(p));
+                  final String localName = fullpath.getName();
                   if (exclude.matcher(localName).matches()
                       || !filter.matcher(localName).matches()) {
                     continue;
@@ -185,7 +187,7 @@ public class ListPathsServlet extends Df
                   if (recur && i.isDir()) {
                     pathstack.push(new Path(p, localName).toUri().getPath());
                   }
-                  writeInfo(p, i, doc);
+                  writeInfo(fullpath, i, doc);
                 }
                 lastReturnedName = thisListing.getLastName();
               } while (thisListing.hasMore());

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java Tue Apr  5 22:39:54 2011
@@ -67,7 +67,6 @@ public class NNStorage extends Storage i
   private static final Log LOG = LogFactory.getLog(NNStorage.class.getName());
 
   static final String MESSAGE_DIGEST_PROPERTY = "imageMD5Digest";
-  static final String CHECKPOINT_TXID_PROPERTY = "checkpointTxId";
 
   //
   // The filenames used for storing the images
@@ -153,13 +152,6 @@ public class NNStorage extends Storage i
   private long checkpointTime = -1L;  // The age of the image
 
   /**
-   * TxId of the last transaction that was included in the most
-   * recent fsimage file. This does not include any transactions
-   * that have since been written to the edit log.
-   */
-  protected long checkpointTxId;
-
-  /**
    * list of failed (and thus removed) storages
    */
   final protected List<StorageDirectory> removedStorageDirs
@@ -501,20 +493,6 @@ public class NNStorage extends Storage i
   }
 
   /**
-   * Set the transaction ID of the last checkpoint
-   */
-  void setCheckpointTxId(long checkpointTxId) {
-    this.checkpointTxId = checkpointTxId;
-  }
-
-  /**
-   * Return the transaction ID of the last checkpoint.
-   */
-  long getCheckpointTxId() {
-    return checkpointTxId;
-  }
-
-  /**
    * Set the current checkpoint time. Writes the new checkpoint
    * time to all available storage directories.
    * @param newCpT The new checkpoint time.
@@ -723,21 +701,6 @@ public class NNStorage extends Storage i
           " has image MD5 digest when version is " + layoutVersion);
     }
 
-    String sCheckpointId = props.getProperty(CHECKPOINT_TXID_PROPERTY);
-    if (layoutVersion <= -28) {
-      if (sCheckpointId == null) {
-        throw new InconsistentFSStateException(sd.getRoot(),
-            "file " + STORAGE_FILE_VERSION
-            + " does not have the checkpoint transaction id set.");
-      }
-      this.checkpointTxId = Long.valueOf(sCheckpointId);
-    } else if (sCheckpointId != null) {
-      throw new InconsistentFSStateException(sd.getRoot(),
-          "file " + STORAGE_FILE_VERSION +
-          " has checkpoint transaction id when version is " 
-          + layoutVersion);
-    }
-
     this.setCheckpointTime(readCheckpointTime(sd));
   }
 
@@ -773,7 +736,7 @@ public class NNStorage extends Storage i
     }
 
     props.setProperty(MESSAGE_DIGEST_PROPERTY, imageDigest.toString());
-    props.setProperty(CHECKPOINT_TXID_PROPERTY, String.valueOf(checkpointTxId));
+
     writeCheckpointTime(sd);
   }
 

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Tue Apr  5 22:39:54 2011
@@ -711,10 +711,10 @@ public class SecondaryNameNode implement
         sdEdits = it.next();
       if (sdEdits == null)
         throw new IOException("Could not locate checkpoint edits");
-      
-      this.getStorage().setStorageInfo(sig);
-      this.getStorage().setImageDigest(sig.getImageDigest());
       if (loadImage) {
+        // to avoid assert in loadFSImage()
+        this.getStorage().layoutVersion = -1;
+        getStorage();
         loadFSImage(NNStorage.getStorageFile(sdName, NameNodeFile.IMAGE));
       }
       loadFSEdits(sdEdits);

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsElement.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsElement.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsElement.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsElement.java Tue Apr  5 22:39:54 2011
@@ -31,7 +31,6 @@ public enum EditsElement {
   EDITS_VERSION,
   RECORD,
   OPCODE,
-  TRANSACTION_ID,
   DATA,
     // elements in the data part of the editLog records
     LENGTH,

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java Tue Apr  5 22:39:54 2011
@@ -49,7 +49,7 @@ import static org.apache.hadoop.hdfs.too
 class EditsLoaderCurrent implements EditsLoader {
 
   private static int [] supportedVersions = {
-    -18, -19, -20, -21, -22, -23, -24, -25, -26, -27, -28 };
+    -18, -19, -20, -21, -22, -23, -24, -25, -26, -27 };
 
   private EditsVisitor v;
   private int editsVersion = 0;
@@ -77,9 +77,7 @@ class EditsLoaderCurrent implements Edit
    * Visit OP_INVALID
    */
   private void visit_OP_INVALID() throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
+    ; // nothing to do, this op code has no data
   }
 
   /**
@@ -103,9 +101,6 @@ class EditsLoaderCurrent implements Edit
    */
   private void visit_OP_ADD_or_OP_CLOSE(FSEditLogOpCodes editsOpCode)
     throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
 
     IntToken opAddLength = v.visitInt(EditsElement.LENGTH);
     // this happens if the edits is not properly ended (-1 op code),
@@ -149,10 +144,6 @@ class EditsLoaderCurrent implements Edit
    * Visit OP_RENAME_OLD
    */
   private void visit_OP_RENAME_OLD() throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
-
     v.visitInt(        EditsElement.LENGTH);
     v.visitStringUTF8( EditsElement.SOURCE);
     v.visitStringUTF8( EditsElement.DESTINATION);
@@ -163,10 +154,6 @@ class EditsLoaderCurrent implements Edit
    * Visit OP_DELETE
    */
   private void visit_OP_DELETE() throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
-
     v.visitInt(        EditsElement.LENGTH);
     v.visitStringUTF8( EditsElement.PATH);
     v.visitStringUTF8( EditsElement.TIMESTAMP);
@@ -176,10 +163,6 @@ class EditsLoaderCurrent implements Edit
    * Visit OP_MKDIR
    */
   private void visit_OP_MKDIR() throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
-
     v.visitInt(        EditsElement.LENGTH);
     v.visitStringUTF8( EditsElement.PATH);
     v.visitStringUTF8( EditsElement.TIMESTAMP);
@@ -198,10 +181,6 @@ class EditsLoaderCurrent implements Edit
    * Visit OP_SET_REPLICATION
    */
   private void visit_OP_SET_REPLICATION() throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
-
     v.visitStringUTF8(EditsElement.PATH);
     v.visitStringUTF8(EditsElement.REPLICATION);
   }
@@ -210,10 +189,6 @@ class EditsLoaderCurrent implements Edit
    * Visit OP_SET_PERMISSIONS
    */
   private void visit_OP_SET_PERMISSIONS() throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
-
     v.visitStringUTF8( EditsElement.PATH);
     v.visitShort(      EditsElement.FS_PERMISSIONS);
   }
@@ -222,10 +197,6 @@ class EditsLoaderCurrent implements Edit
    * Visit OP_SET_OWNER
    */
   private void visit_OP_SET_OWNER() throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
-
     v.visitStringUTF8(EditsElement.PATH);
     v.visitStringUTF8(EditsElement.USERNAME);
     v.visitStringUTF8(EditsElement.GROUPNAME);
@@ -235,10 +206,6 @@ class EditsLoaderCurrent implements Edit
    * Visit OP_SET_GENSTAMP
    */
   private void visit_OP_SET_GENSTAMP() throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
-
     v.visitLong(EditsElement.GENERATION_STAMP);
   }
 
@@ -246,10 +213,6 @@ class EditsLoaderCurrent implements Edit
    * Visit OP_TIMES
    */
   private void visit_OP_TIMES() throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
-
     v.visitInt(        EditsElement.LENGTH);
     v.visitStringUTF8( EditsElement.PATH);
     v.visitStringUTF8( EditsElement.MTIME);
@@ -260,10 +223,6 @@ class EditsLoaderCurrent implements Edit
    * Visit OP_SET_QUOTA
    */
   private void visit_OP_SET_QUOTA() throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
-
     v.visitStringUTF8( EditsElement.PATH);
     v.visitLong(       EditsElement.NS_QUOTA);
     v.visitLong(       EditsElement.DS_QUOTA);
@@ -273,10 +232,6 @@ class EditsLoaderCurrent implements Edit
    * Visit OP_RENAME
    */
   private void visit_OP_RENAME() throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
-
     if(editsVersion > -21) {
       throw new IOException("Unexpected op code " + FSEditLogOpCodes.OP_RENAME
         + " for edit log version " + editsVersion
@@ -293,10 +248,6 @@ class EditsLoaderCurrent implements Edit
    * Visit OP_CONCAT_DELETE
    */
   private void visit_OP_CONCAT_DELETE() throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
-
     if(editsVersion > -22) {
       throw new IOException("Unexpected op code "
         + FSEditLogOpCodes.OP_CONCAT_DELETE
@@ -317,10 +268,6 @@ class EditsLoaderCurrent implements Edit
    * Visit OP_SYMLINK
    */
   private void visit_OP_SYMLINK() throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
-
     v.visitInt(        EditsElement.LENGTH);
     v.visitStringUTF8( EditsElement.SOURCE);
     v.visitStringUTF8( EditsElement.DESTINATION);
@@ -340,25 +287,21 @@ class EditsLoaderCurrent implements Edit
    * Visit OP_GET_DELEGATION_TOKEN
    */
   private void visit_OP_GET_DELEGATION_TOKEN() throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
-    
-    if(editsVersion > -24) {
-      throw new IOException("Unexpected op code "
+      if(editsVersion > -24) {
+        throw new IOException("Unexpected op code "
           + FSEditLogOpCodes.OP_GET_DELEGATION_TOKEN
           + " for edit log version " + editsVersion
           + " (op code 18 only expected for 24 and later)");
-    }
-    v.visitByte(       EditsElement.T_VERSION);
-    v.visitStringText( EditsElement.T_OWNER);
-    v.visitStringText( EditsElement.T_RENEWER);
-    v.visitStringText( EditsElement.T_REAL_USER);
-    v.visitVLong(      EditsElement.T_ISSUE_DATE);
-    v.visitVLong(      EditsElement.T_MAX_DATE);
-    v.visitVInt(       EditsElement.T_SEQUENCE_NUMBER);
-    v.visitVInt(       EditsElement.T_MASTER_KEY_ID);
-    v.visitStringUTF8( EditsElement.T_EXPIRY_TIME);
+      }
+      v.visitByte(       EditsElement.T_VERSION);
+      v.visitStringText( EditsElement.T_OWNER);
+      v.visitStringText( EditsElement.T_RENEWER);
+      v.visitStringText( EditsElement.T_REAL_USER);
+      v.visitVLong(      EditsElement.T_ISSUE_DATE);
+      v.visitVLong(      EditsElement.T_MAX_DATE);
+      v.visitVInt(       EditsElement.T_SEQUENCE_NUMBER);
+      v.visitVInt(       EditsElement.T_MASTER_KEY_ID);
+      v.visitStringUTF8( EditsElement.T_EXPIRY_TIME);
   }
 
   /**
@@ -366,25 +309,22 @@ class EditsLoaderCurrent implements Edit
    */
   private void visit_OP_RENEW_DELEGATION_TOKEN()
     throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
 
-    if(editsVersion > -24) {
-      throw new IOException("Unexpected op code "
+      if(editsVersion > -24) {
+        throw new IOException("Unexpected op code "
           + FSEditLogOpCodes.OP_RENEW_DELEGATION_TOKEN
           + " for edit log version " + editsVersion
           + " (op code 19 only expected for 24 and later)");
-    }
-    v.visitByte(       EditsElement.T_VERSION);
-    v.visitStringText( EditsElement.T_OWNER);
-    v.visitStringText( EditsElement.T_RENEWER);
-    v.visitStringText( EditsElement.T_REAL_USER);
-    v.visitVLong(      EditsElement.T_ISSUE_DATE);
-    v.visitVLong(      EditsElement.T_MAX_DATE);
-    v.visitVInt(       EditsElement.T_SEQUENCE_NUMBER);
-    v.visitVInt(       EditsElement.T_MASTER_KEY_ID);
-    v.visitStringUTF8( EditsElement.T_EXPIRY_TIME);
+      }
+      v.visitByte(       EditsElement.T_VERSION);
+      v.visitStringText( EditsElement.T_OWNER);
+      v.visitStringText( EditsElement.T_RENEWER);
+      v.visitStringText( EditsElement.T_REAL_USER);
+      v.visitVLong(      EditsElement.T_ISSUE_DATE);
+      v.visitVLong(      EditsElement.T_MAX_DATE);
+      v.visitVInt(       EditsElement.T_SEQUENCE_NUMBER);
+      v.visitVInt(       EditsElement.T_MASTER_KEY_ID);
+      v.visitStringUTF8( EditsElement.T_EXPIRY_TIME);
   }
 
   /**
@@ -392,24 +332,21 @@ class EditsLoaderCurrent implements Edit
    */
   private void visit_OP_CANCEL_DELEGATION_TOKEN()
     throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
 
-    if(editsVersion > -24) {
-      throw new IOException("Unexpected op code "
+      if(editsVersion > -24) {
+        throw new IOException("Unexpected op code "
           + FSEditLogOpCodes.OP_CANCEL_DELEGATION_TOKEN
           + " for edit log version " + editsVersion
           + " (op code 20 only expected for 24 and later)");
-    }
-    v.visitByte(       EditsElement.T_VERSION);
-    v.visitStringText( EditsElement.T_OWNER);
-    v.visitStringText( EditsElement.T_RENEWER);
-    v.visitStringText( EditsElement.T_REAL_USER);
-    v.visitVLong(      EditsElement.T_ISSUE_DATE);
-    v.visitVLong(      EditsElement.T_MAX_DATE);
-    v.visitVInt(       EditsElement.T_SEQUENCE_NUMBER);
-    v.visitVInt(       EditsElement.T_MASTER_KEY_ID);
+      }
+      v.visitByte(       EditsElement.T_VERSION);
+      v.visitStringText( EditsElement.T_OWNER);
+      v.visitStringText( EditsElement.T_RENEWER);
+      v.visitStringText( EditsElement.T_REAL_USER);
+      v.visitVLong(      EditsElement.T_ISSUE_DATE);
+      v.visitVLong(      EditsElement.T_MAX_DATE);
+      v.visitVInt(       EditsElement.T_SEQUENCE_NUMBER);
+      v.visitVInt(       EditsElement.T_MASTER_KEY_ID);
   }
 
   /**
@@ -417,20 +354,17 @@ class EditsLoaderCurrent implements Edit
    */
   private void visit_OP_UPDATE_MASTER_KEY()
     throws IOException {
-    if(editsVersion <= -28) {
-      v.visitLong(EditsElement.TRANSACTION_ID);
-    }
-    
-    if(editsVersion > -24) {
-      throw new IOException("Unexpected op code "
+
+      if(editsVersion > -24) {
+        throw new IOException("Unexpected op code "
           + FSEditLogOpCodes.OP_UPDATE_MASTER_KEY
           + " for edit log version " + editsVersion
           + "(op code 21 only expected for 24 and later)");
-    }
-    v.visitVInt(  EditsElement.KEY_ID);
-    v.visitVLong( EditsElement.KEY_EXPIRY_DATE);
-    VIntToken blobLengthToken = v.visitVInt(EditsElement.KEY_LENGTH);
-    v.visitBlob(EditsElement.KEY_BLOB, blobLengthToken.value);
+      }
+      v.visitVInt(  EditsElement.KEY_ID);
+      v.visitVLong( EditsElement.KEY_EXPIRY_DATE);
+      VIntToken blobLengthToken = v.visitVInt(EditsElement.KEY_LENGTH);
+      v.visitBlob(EditsElement.KEY_BLOB, blobLengthToken.value);
   }
 
   private void visitOpCode(FSEditLogOpCodes editsOpCode)

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java Tue Apr  5 22:39:54 2011
@@ -121,7 +121,7 @@ class ImageLoaderCurrent implements Imag
   protected final DateFormat dateFormat = 
                                       new SimpleDateFormat("yyyy-MM-dd HH:mm");
   private static int [] versions = 
-    {-16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -27, -28};
+    {-16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -27};
   private int imageVersion = 0;
 
   /* (non-Javadoc)
@@ -156,10 +156,6 @@ class ImageLoaderCurrent implements Imag
 
       v.visit(ImageElement.GENERATION_STAMP, in.readLong());
 
-      if (imageVersion <= -28) {
-        v.visit(ImageElement.TRANSACTION_ID, in.readLong());
-      }
-
       if (imageVersion <= -25) {
         boolean isCompressed = in.readBoolean();
         v.visit(ImageElement.IS_COMPRESSED, imageVersion);

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java Tue Apr  5 22:39:54 2011
@@ -71,8 +71,7 @@ abstract class ImageVisitor {
     NUM_DELEGATION_TOKENS,
     DELEGATION_TOKENS,
     DELEGATION_TOKEN_IDENTIFIER,
-    DELEGATION_TOKEN_EXPIRY_TIME,
-    TRANSACTION_ID
+    DELEGATION_TOKEN_EXPIRY_TIME
   }
   
   /**

Propchange: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Apr  5 22:39:54 2011
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/src/test/hdfs:776175-785643
 /hadoop/hdfs/branches/HDFS-265/src/test/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/test/hdfs:820487
-/hadoop/hdfs/trunk/src/test/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1035924,1036132,1036213,1036303,1036310,1036631,1036692,1036767,1038222-1038859,1039957,1040005,1040027-1060619,1061067,1062011-1065960,1066305,1066970,1066986,1067079-1076024,1076696,1078925,1078940,1079069-1080836,1081580-1083021,1083043,1083084,1083234,1083902
+/hadoop/hdfs/trunk/src/test/hdfs:987665-1004788,1026178-1028906,1032470-1033639,1034073,1034082-1034181,1034501-1034544,1034932,1035141,1035143,1035145,1035163,1035386,1035410,1035508,1035515,1035552,1035718,1035795,1035841-1035842,1035890,1035920,1035924,1036132,1036213,1036303,1036310,1036631,1036692,1036767,1038222-1038859,1039957,1040005,1040027-1060619,1061067,1062011-1065960,1066305,1066970,1066986,1067079-1076024,1076696,1078925,1078940,1079069-1080836,1081580-1083021,1083043,1083084,1083234,1083902,1083951,1083958,1085460,1085509

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestListPathServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestListPathServlet.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestListPathServlet.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestListPathServlet.java Tue Apr  5 22:39:54 2011
@@ -91,12 +91,18 @@ public class TestListPathServlet {
     createFile("/a", 1);
     createFile("/b", 1);
     mkdirs("/dir");
+
+    checkFile(new Path("/a"));
+    checkFile(new Path("/b"));
     checkStatus("/");
 
     // A directory with files and directories
     createFile("/dir/.a.crc", 1);
     createFile("/dir/b", 1);
     mkdirs("/dir/dir1");
+    
+    checkFile(new Path("/dir/.a.crc"));
+    checkFile(new Path("/dir/b"));
     checkStatus("/dir");
 
     // Non existent path
@@ -159,4 +165,36 @@ public class TestListPathServlet {
           found);
     }
   }
+
+  private void checkFile(final Path f) throws IOException {
+    final Path hdfspath = fs.makeQualified(f);
+    final FileStatus hdfsstatus = fs.getFileStatus(hdfspath);
+    FileSystem.LOG.info("hdfspath=" + hdfspath);
+
+    final Path hftppath = hftpFs.makeQualified(f);
+    final FileStatus hftpstatus = hftpFs.getFileStatus(hftppath);
+    FileSystem.LOG.info("hftppath=" + hftppath);
+    
+    Assert.assertEquals(hdfspath.toUri().getPath(),
+        hdfsstatus.getPath().toUri().getPath());
+    checkFileStatus(hdfsstatus, hftpstatus);
+  }
+
+  private static void checkFileStatus(final FileStatus expected,
+      final FileStatus computed) {
+    Assert.assertEquals(expected.getPath().toUri().getPath(),
+        computed.getPath().toUri().getPath());
+
+// TODO: test will fail if the following is un-commented. 
+//    Assert.assertEquals(expected.getAccessTime(), computed.getAccessTime());
+//    Assert.assertEquals(expected.getModificationTime(),
+//        computed.getModificationTime());
+
+    Assert.assertEquals(expected.getBlockSize(), computed.getBlockSize());
+    Assert.assertEquals(expected.getGroup(), computed.getGroup());
+    Assert.assertEquals(expected.getLen(), computed.getLen());
+    Assert.assertEquals(expected.getOwner(), computed.getOwner());
+    Assert.assertEquals(expected.getPermission(), computed.getPermission());
+    Assert.assertEquals(expected.getReplication(), computed.getReplication());
+  }
 }

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Tue Apr  5 22:39:54 2011
@@ -171,14 +171,6 @@ public class TestBackupNode extends Test
       //
       backup = startBackupNode(conf, op, 1);
       waitCheckpointDone(backup);
-
-      for (int i = 0; i < 10; i++) {
-        writeFile(fileSys, new Path("file_" + i), replication);
-      }
-
-      backup.doCheckpoint();
-      waitCheckpointDone(backup);
-
     } catch(IOException e) {
       LOG.error("Error in TestBackupNode:", e);
       assertTrue(e.getLocalizedMessage(), false);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1089274&r1=1089273&r2=1089274&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Tue Apr  5 22:39:54 2011
@@ -30,15 +30,12 @@ import org.apache.hadoop.fs.permission.*
 
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
-
-import org.apache.hadoop.util.StringUtils;
  
 import org.mockito.Mockito;
 
@@ -53,26 +50,6 @@ public class TestEditLog extends TestCas
   static final int NUM_TRANSACTIONS = 100;
   static final int NUM_THREADS = 100;
 
-  /** An edits log with 3 edits from 0.20 - the result of
-   * a fresh namesystem followed by hadoop fs -touchz /myfile */
-  static final byte[] HADOOP20_SOME_EDITS =
-    StringUtils.hexStringToByte((
-        "ffff ffed 0a00 0000 0000 03fa e100 0000" +
-        "0005 0007 2f6d 7966 696c 6500 0133 000d" +
-        "3132 3932 3331 3634 3034 3138 3400 0d31" +
-        "3239 3233 3136 3430 3431 3834 0009 3133" +
-        "3432 3137 3732 3800 0000 0004 746f 6464" +
-        "0a73 7570 6572 6772 6f75 7001 a400 1544" +
-        "4653 436c 6965 6e74 5f2d 3136 3136 3535" +
-        "3738 3931 000b 3137 322e 3239 2e35 2e33" +
-        "3209 0000 0005 0007 2f6d 7966 696c 6500" +
-        "0133 000d 3132 3932 3331 3634 3034 3138" +
-        "3400 0d31 3239 3233 3136 3430 3431 3834" +
-        "0009 3133 3432 3137 3732 3800 0000 0004" +
-        "746f 6464 0a73 7570 6572 6772 6f75 7001" +
-        "a4ff 0000 0000 0000 0000 0000 0000 0000"
-    ).replace(" ",""));
-
   //
   // an object that does a bunch of transactions
   //
@@ -104,51 +81,6 @@ public class TestEditLog extends TestCas
   }
 
   /**
-   * Test case for an empty edit log from a prior version of Hadoop.
-   */
-  public void testPreTxIdEditLogNoEdits() throws Exception {
-    FSNamesystem namesys = Mockito.mock(FSNamesystem.class);
-    int numEdits = testLoad(
-        StringUtils.hexStringToByte("ffffffed"), // just version number
-        namesys);
-    assertEquals(0, numEdits);
-  }
-  
-  /**
-   * Test case for loading a very simple edit log from a format
-   * prior to the inclusion of edit transaction IDs in the log.
-   */
-  public void testPreTxidEditLogWithEdits() throws Exception {
-    Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = null;
-
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
-      cluster.waitActive();
-      final FSNamesystem namesystem = cluster.getNamesystem();
-
-      int numEdits = testLoad(HADOOP20_SOME_EDITS, namesystem);
-      assertEquals(3, numEdits);
-      // Sanity check the edit
-      HdfsFileStatus fileInfo = namesystem.getFileInfo("/myfile", false);
-      assertEquals("supergroup", fileInfo.getGroup());
-      assertEquals(3, fileInfo.getReplication());
-    } finally {
-      cluster.shutdown();
-    }
-  }
-  
-  private int testLoad(byte[] data, FSNamesystem namesys) throws IOException {
-    FSEditLogLoader loader = new FSEditLogLoader(namesys);
-    EditLogInputStream mockStream = Mockito.mock(EditLogInputStream.class);
-    ByteArrayInputStream bais = new ByteArrayInputStream(data);
-    Mockito.doReturn(new DataInputStream(bais))
-      .when(mockStream).getDataInputStream();
-    
-    return loader.loadFSEdits(mockStream, 1);
-  }
-
-  /**
    * Tests transaction logging in dfs.
    */
   public void testEditLog() throws IOException {
@@ -213,13 +145,13 @@ public class TestEditLog extends TestCas
       // If there were any corruptions, it is likely that the reading in
       // of these transactions will throw an exception.
       //
+      FSEditLogLoader loader = new FSEditLogLoader(namesystem);
       for (Iterator<StorageDirectory> it = 
               fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
-        FSEditLogLoader loader = new FSEditLogLoader(namesystem);
         File editFile = NNStorage.getStorageFile(it.next(), NameNodeFile.EDITS);
         System.out.println("Verifying file: " + editFile);
         int numEdits = loader.loadFSEdits(
-            new EditLogFileInputStream(editFile), 1);
+                                  new EditLogFileInputStream(editFile));
         int numLeases = namesystem.leaseManager.countLease();
         System.out.println("Number of outstanding leases " + numLeases);
         assertEquals(0, numLeases);