You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2013/01/18 00:38:30 UTC

svn commit: r1434966 - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/ src/test/java/org/apache/hadoop/hdfs/se...

Author: szetszwo
Date: Thu Jan 17 23:38:30 2013
New Revision: 1434966

URL: http://svn.apache.org/viewvc?rev=1434966&view=rev
Log:
HDFS-4098. Add FileWithSnapshot, INodeFileUnderConstructionWithSnapshot and INodeFileUnderConstructionSnapshot for supporting append to snapshotted files.

Added:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionSnapshot.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java
      - copied, changed from r1434954, hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java
Removed:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java
Modified:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileSnapshot.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt?rev=1434966&r1=1434965&r2=1434966&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt Thu Jan 17 23:38:30 2013
@@ -109,3 +109,7 @@ Branch-2802 Snapshot (Unreleased)
 
   HDFS-4407. Change INodeDirectoryWithSnapshot.Diff.combinePostDiff(..) to
   merge-sort like and keep the postDiff parameter unmodified.  (szetszwo)
+
+  HDFS-4098. Add FileWithSnapshot, INodeFileUnderConstructionWithSnapshot and
+  INodeFileUnderConstructionSnapshot for supporting append to snapshotted files.
+  (szetszwo)

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1434966&r1=1434965&r2=1434966&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Thu Jan 17 23:38:30 2013
@@ -324,7 +324,7 @@ public class FSEditLogLoader {
       if (oldFile.isUnderConstruction()) {
         INodeFileUnderConstruction ucFile = (INodeFileUnderConstruction) oldFile;
         fsNamesys.leaseManager.removeLeaseWithPrefixPath(addCloseOp.path);
-        INodeFile newFile = ucFile.convertToInodeFile(ucFile.getModificationTime());
+        INodeFile newFile = ucFile.toINodeFile(ucFile.getModificationTime());
         fsDir.unprotectedReplaceINodeFile(addCloseOp.path, ucFile, newFile,
             iip.getLatestSnapshot());
       }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1434966&r1=1434965&r2=1434966&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Jan 17 23:38:30 2013
@@ -178,7 +178,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithLink;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
 import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
@@ -1357,10 +1357,11 @@ public class FSNamesystem implements Nam
           doAccessTime = false;
         }
 
-        long now = now();
-        final INodesInPath iip = dir.getMutableINodesInPath(src);
+        final INodesInPath iip = dir.getINodesInPath(src);
         final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
-        if (doAccessTime && isAccessTimeSupported()) {
+        if (!iip.isSnapshot() //snapshots are readonly, so don't update atime.
+            && doAccessTime && isAccessTimeSupported()) {
+          final long now = now();
           if (now <= inode.getAccessTime() + getAccessTimePrecision()) {
             // if we have to set access time but we only have the readlock, then
             // restart this entire operation with the writeLock.
@@ -1488,7 +1489,7 @@ public class FSNamesystem implements Nam
       throw new HadoopIllegalArgumentException("concat: target file "
           + target + " is empty");
     }
-    if (trgInode instanceof INodeFileWithLink) {
+    if (trgInode instanceof INodeFileWithSnapshot) {
       throw new HadoopIllegalArgumentException("concat: target file "
           + target + " is in a snapshot");
     }
@@ -1981,18 +1982,12 @@ public class FSNamesystem implements Nam
   LocatedBlock prepareFileForWrite(String src, INodeFile file,
       String leaseHolder, String clientMachine, DatanodeDescriptor clientNode,
       boolean writeToEditLog, Snapshot latestSnapshot) throws IOException {
-    //TODO SNAPSHOT: INodeFileUnderConstruction with link
-    INodeFileUnderConstruction cons = new INodeFileUnderConstruction(
-                                    file.getId(),
-                                    file.getLocalNameBytes(),
-                                    file.getFileReplication(),
-                                    file.getModificationTime(),
-                                    file.getPreferredBlockSize(),
-                                    file.getBlocks(),
-                                    file.getPermissionStatus(),
-                                    leaseHolder,
-                                    clientMachine,
-                                    clientNode);
+    if (latestSnapshot != null) {
+      file = (INodeFile)file.recordModification(latestSnapshot).left;
+    }
+    final INodeFileUnderConstruction cons = file.toUnderConstruction(
+        leaseHolder, clientMachine, clientNode);
+
     dir.replaceINodeFile(src, file, cons, latestSnapshot);
     leaseManager.addLease(cons.getClientName(), src);
     
@@ -3301,7 +3296,7 @@ public class FSNamesystem implements Nam
 
     // The file is no longer pending.
     // Create permanent INode, update blocks
-    INodeFile newFile = pendingFile.convertToInodeFile(now());
+    final INodeFile newFile = pendingFile.toINodeFile(now());
     dir.replaceINodeFile(src, pendingFile, newFile, latestSnapshot);
 
     // close file and persist block allocations for this file

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1434966&r1=1434965&r2=1434966&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java Thu Jan 17 23:38:30 2013
@@ -508,7 +508,7 @@ public abstract class INode implements C
   /**
    * Set last access time of inode.
    */
-  INode setAccessTime(long atime, Snapshot latest) {
+  public INode setAccessTime(long atime, Snapshot latest) {
     Pair<? extends INode, ? extends INode> pair = recordModification(latest);
     INode nodeToUpdate = pair != null ? pair.left : this;    
     nodeToUpdate.accessTime = atime;

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1434966&r1=1434965&r2=1434966&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Thu Jan 17 23:38:30 2013
@@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithLink;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
@@ -216,13 +216,13 @@ public class INodeDirectory extends INod
     oldDir.setParent(null);
   }
 
-  /** Replace a child {@link INodeFile} with an {@link INodeFileWithLink}. */
-  INodeFileWithLink replaceINodeFile(final INodeFile child) {
+  /** Replace a child {@link INodeFile} with an {@link INodeFileWithSnapshot}. */
+  INodeFileWithSnapshot replaceINodeFile(final INodeFile child) {
     assertChildrenNonNull();
-    Preconditions.checkArgument(!(child instanceof INodeFileWithLink),
+    Preconditions.checkArgument(!(child instanceof INodeFileWithSnapshot),
         "Child file is already an INodeFileWithLink, child=" + child);
 
-    final INodeFileWithLink newChild = new INodeFileWithLink(child);
+    final INodeFileWithSnapshot newChild = new INodeFileWithSnapshot(child);
     final int i = searchChildrenForExistingINode(newChild);
     children.set(i, newChild);
     return newChild;

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1434966&r1=1434965&r2=1434966&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Thu Jan 17 23:38:30 2013
@@ -28,10 +28,11 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileSnapshot;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithLink;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 
+import com.google.common.base.Preconditions;
+
 /** I-node for closed file. */
 @InterfaceAudience.Private
 public class INodeFile extends INode implements BlockCollection {
@@ -111,7 +112,7 @@ public class INodeFile extends INode imp
   }
 
   @Override
-  public Pair<INodeFileWithLink, INodeFileSnapshot> createSnapshotCopy() {
+  public Pair<? extends INodeFile, ? extends INodeFile> createSnapshotCopy() {
     return parent.replaceINodeFile(this).createSnapshotCopy();
   }
 
@@ -121,6 +122,17 @@ public class INodeFile extends INode imp
     return true;
   }
 
+  /** Convert this file to an {@link INodeFileUnderConstruction}. */
+  public INodeFileUnderConstruction toUnderConstruction(
+      String clientName,
+      String clientMachine,
+      DatanodeDescriptor clientNode) {
+    Preconditions.checkArgument(!(this instanceof INodeFileUnderConstruction),
+        "file is already an INodeFileUnderConstruction");
+    return new INodeFileUnderConstruction(this,
+        clientName, clientMachine, clientNode); 
+  }
+
   /**
    * Set the {@link FsPermission} of this {@link INodeFile}.
    * Since this is a file,
@@ -141,7 +153,7 @@ public class INodeFile extends INode imp
     return getFileReplication();
   }
 
-  protected void setFileReplication(short replication, Snapshot latest) {
+  public void setFileReplication(short replication, Snapshot latest) {
     if (latest != null) {
       final Pair<? extends INode, ? extends INode> p = recordModification(latest);
       if (p != null) {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=1434966&r1=1434965&r2=1434966&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java Thu Jan 17 23:38:30 2013
@@ -30,6 +30,8 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 
+import com.google.common.base.Preconditions;
+
 /**
  * I-node for file being written.
  */
@@ -77,8 +79,18 @@ public class INodeFileUnderConstruction 
     this.clientMachine = clientMachine;
     this.clientNode = clientNode;
   }
+  
+  protected INodeFileUnderConstruction(final INodeFile that,
+      final String clientName,
+      final String clientMachine,
+      final DatanodeDescriptor clientNode) {
+    super(that);
+    this.clientName = clientName;
+    this.clientMachine = clientMachine;
+    this.clientNode = clientNode;
+  }
 
-  String getClientName() {
+  public String getClientName() {
     return clientName;
   }
 
@@ -86,11 +98,11 @@ public class INodeFileUnderConstruction 
     this.clientName = clientName;
   }
 
-  String getClientMachine() {
+  public String getClientMachine() {
     return clientMachine;
   }
 
-  DatanodeDescriptor getClientNode() {
+  public DatanodeDescriptor getClientNode() {
     return clientNode;
   }
 
@@ -102,30 +114,27 @@ public class INodeFileUnderConstruction 
     return true;
   }
 
-  //
-  // converts a INodeFileUnderConstruction into a INodeFile
-  // use the modification time as the access time
-  //
-  INodeFile convertToInodeFile(long mtime) {
-    assert allBlocksComplete() : "Can't finalize inode " + this
-      + " since it contains non-complete blocks! Blocks are "
-      + Arrays.asList(getBlocks());
-    //TODO SNAPSHOT: may convert to INodeFileWithLink
+  /**
+   * Converts an INodeFileUnderConstruction to an INodeFile.
+   * The original modification time is used as the access time.
+   * The new modification is the specified mtime.
+   */
+  protected INodeFile toINodeFile(long mtime) {
+    assertAllBlocksComplete();
+
     return new INodeFile(getId(), getLocalNameBytes(), getPermissionStatus(),
         mtime, getModificationTime(),
         getBlocks(), getFileReplication(), getPreferredBlockSize());
   }
   
-  /**
-   * @return true if all of the blocks in this file are marked as completed.
-   */
-  private boolean allBlocksComplete() {
-    for (BlockInfo b : getBlocks()) {
-      if (!b.isComplete()) {
-        return false;
-      }
+  /** Assert all blocks are complete. */
+  protected void assertAllBlocksComplete() {
+    final BlockInfo[] blocks = getBlocks();
+    for (int i = 0; i < blocks.length; i++) {
+      Preconditions.checkState(blocks[i].isComplete(), "Failed to finalize"
+          + " %s %s since blocks[%s] is non-complete, where blocks=%s.",
+          getClass().getSimpleName(), this, i, Arrays.asList(getBlocks()));
     }
-    return true;
   }
 
   /**

Added: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java?rev=1434966&view=auto
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java (added)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java Thu Jan 17 23:38:30 2013
@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
+import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapINodeUpdateEntry;
+import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * {@link INodeFile} with a link to the next element.
+ * The link of all the snapshot files and the original file form a circular
+ * linked list so that all elements are accessible by any of the elements.
+ */
+@InterfaceAudience.Private
+public interface FileWithSnapshot {
+  /** @return the {@link INodeFile} view of this object. */
+  public INodeFile asINodeFile();
+  
+  /** @return the next element. */
+  public FileWithSnapshot getNext();
+
+  /** Set the next element. */
+  public void setNext(FileWithSnapshot next);
+  
+  /** Insert inode to the circular linked list. */
+  public void insert(FileWithSnapshot inode);
+  
+  /** Utility methods for the classes which implement the interface. */
+  static class Util {
+
+    /** Replace the old file with the new file in the circular linked list. */
+    static void replace(FileWithSnapshot oldFile, FileWithSnapshot newFile) {
+      //set next element
+      FileWithSnapshot i = oldFile.getNext();
+      newFile.setNext(i);
+      oldFile.setNext(null);
+      //find previous element and update it
+      for(; i.getNext() != oldFile; i = i.getNext());
+      i.setNext(newFile);
+    }
+
+    /**
+     * @return the max file replication of the elements
+     *         in the circular linked list.
+     */
+    static short getBlockReplication(final FileWithSnapshot file) {
+      short max = file.asINodeFile().getFileReplication();
+      // i may be null since next will be set to null when the INode is deleted
+      for(FileWithSnapshot i = file.getNext();
+          i != file && i != null;
+          i = i.getNext()) {
+        final short replication = i.asINodeFile().getFileReplication();
+        if (replication > max) {
+          max = replication;
+        }
+      }
+      return max;
+    }
+
+    /**
+     * Remove the current inode from the circular linked list.
+     * If some blocks at the end of the block list no longer belongs to
+     * any other inode, collect them and update the block list.
+     */
+    static int collectSubtreeBlocksAndClear(final FileWithSnapshot file,
+        final BlocksMapUpdateInfo info) {
+      final FileWithSnapshot next = file.getNext();
+      Preconditions.checkState(next != file, "this is the only remaining inode.");
+
+      // There are other inode(s) using the blocks.
+      // Compute max file size excluding this and find the last inode.
+      long max = next.asINodeFile().computeFileSize(true);
+      short maxReplication = next.asINodeFile().getFileReplication();
+      FileWithSnapshot last = next;
+      for(FileWithSnapshot i = next.getNext(); i != file; i = i.getNext()) {
+        final long size = i.asINodeFile().computeFileSize(true);
+        if (size > max) {
+          max = size;
+        }
+        final short rep = i.asINodeFile().getFileReplication();
+        if (rep > maxReplication) {
+          maxReplication = rep;
+        }
+        last = i;
+      }
+
+      collectBlocksBeyondMaxAndClear(file, max, info);
+      
+      // remove this from the circular linked list.
+      last.setNext(next);
+      // Set the replication of the current INode to the max of all the other
+      // linked INodes, so that in case the current INode is retrieved from the
+      // blocksMap before it is removed or updated, the correct replication
+      // number can be retrieved.
+      file.asINodeFile().setFileReplication(maxReplication, null);
+      file.setNext(null);
+      // clear parent
+      file.asINodeFile().setParent(null);
+      return 1;
+    }
+
+    static void collectBlocksBeyondMaxAndClear(final FileWithSnapshot file,
+            final long max, final BlocksMapUpdateInfo info) {
+      final BlockInfo[] oldBlocks = file.asINodeFile().getBlocks();
+      if (oldBlocks != null) {
+        //find the minimum n such that the size of the first n blocks > max
+        int n = 0;
+        for(long size = 0; n < oldBlocks.length && max > size; n++) {
+          size += oldBlocks[n].getNumBytes();
+        }
+
+        // Replace the INode for all the remaining blocks in blocksMap
+        final FileWithSnapshot next = file.getNext();
+        final BlocksMapINodeUpdateEntry entry = new BlocksMapINodeUpdateEntry(
+            file.asINodeFile(), next.asINodeFile());
+        if (info != null) {
+          for (int i = 0; i < n; i++) {
+            info.addUpdateBlock(oldBlocks[i], entry);
+          }
+        }
+        
+        // starting from block n, the data is beyond max.
+        if (n < oldBlocks.length) {
+          // resize the array.  
+          final BlockInfo[] newBlocks;
+          if (n == 0) {
+            newBlocks = null;
+          } else {
+            newBlocks = new BlockInfo[n];
+            System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
+          }
+          for(FileWithSnapshot i = next; i != file; i = i.getNext()) {
+            i.asINodeFile().setBlocks(newBlocks);
+          }
+
+          // collect the blocks beyond max.  
+          if (info != null) {
+            for(; n < oldBlocks.length; n++) {
+              info.addDeleteBlock(oldBlocks[n]);
+            }
+          }
+        }
+        file.asINodeFile().setBlocks(null);
+      }
+    }
+  }
+}

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileSnapshot.java?rev=1434966&r1=1434965&r2=1434966&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileSnapshot.java Thu Jan 17 23:38:30 2013
@@ -23,11 +23,11 @@ import org.apache.hadoop.classification.
  *  INode representing a snapshot of a file.
  */
 @InterfaceAudience.Private
-public class INodeFileSnapshot extends INodeFileWithLink {
+public class INodeFileSnapshot extends INodeFileWithSnapshot {
   /** The file size at snapshot creation time. */
   final long size;
 
-  INodeFileSnapshot(INodeFileWithLink f) {
+  INodeFileSnapshot(INodeFileWithSnapshot f) {
     super(f);
     this.size = f.computeFileSize(true);
     f.insert(this);

Added: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionSnapshot.java?rev=1434966&view=auto
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionSnapshot.java (added)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionSnapshot.java Thu Jan 17 23:38:30 2013
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
+
+/**
+ *  INode representing a snapshot of an {@link INodeFileUnderConstruction}.
+ */
+@InterfaceAudience.Private
+public class INodeFileUnderConstructionSnapshot
+    extends INodeFileUnderConstructionWithSnapshot {
+  /** The file size at snapshot creation time. */
+  final long size;
+
+  INodeFileUnderConstructionSnapshot(INodeFileUnderConstructionWithSnapshot f) {
+    super(f, f.getClientName(), f.getClientMachine(), f.getClientNode());
+    this.size = f.computeFileSize(true);
+    f.insert(this);
+  }
+
+  @Override
+  public long computeFileSize(boolean includesBlockInfoUnderConstruction) {
+    //ignore includesBlockInfoUnderConstruction 
+    //since files in a snapshot are considered as closed.
+    return size;
+  }
+}
\ No newline at end of file

Added: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java?rev=1434966&view=auto
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java (added)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java Thu Jan 17 23:38:30 2013
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
+import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
+
+/**
+ * Represent an {@link INodeFileUnderConstruction} that is snapshotted.
+ * Note that snapshot files are represented by
+ * {@link INodeFileUnderConstructionSnapshot}.
+ */
+@InterfaceAudience.Private
+public class INodeFileUnderConstructionWithSnapshot
+    extends INodeFileUnderConstruction implements FileWithSnapshot {
+  private FileWithSnapshot next;
+
+  INodeFileUnderConstructionWithSnapshot(final FileWithSnapshot f,
+      final String clientName,
+      final String clientMachine,
+      final DatanodeDescriptor clientNode) {
+    super(f.asINodeFile(), clientName, clientMachine, clientNode);
+  }
+
+  @Override
+  protected INodeFileWithSnapshot toINodeFile(final long mtime) {
+    assertAllBlocksComplete();
+    final long atime = getModificationTime();
+    final INodeFileWithSnapshot f = new INodeFileWithSnapshot(this);
+    f.setModificationTime(mtime, null);
+    f.setAccessTime(atime, null);
+    Util.replace(this, f);
+    return f;
+  }
+
+  @Override
+  public Pair<? extends INodeFileUnderConstruction,
+      INodeFileUnderConstructionSnapshot> createSnapshotCopy() {
+    return new Pair<INodeFileUnderConstructionWithSnapshot,
+        INodeFileUnderConstructionSnapshot>(
+            this, new INodeFileUnderConstructionSnapshot(this));
+  }
+
+  @Override
+  public INodeFile asINodeFile() {
+    return this;
+  }
+
+  @Override
+  public FileWithSnapshot getNext() {
+    return next;
+  }
+
+  @Override
+  public void setNext(FileWithSnapshot next) {
+    this.next = next;
+  }
+
+  @Override
+  public void insert(FileWithSnapshot inode) {
+    inode.setNext(this.getNext());
+    this.setNext(inode);
+  }
+
+  @Override
+  public short getBlockReplication() {
+    return Util.getBlockReplication(this);
+  }
+
+  @Override
+  public int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
+    if (next == null || next == this) {
+      // this is the only remaining inode.
+      return super.collectSubtreeBlocksAndClear(info);
+    } else {
+      return Util.collectSubtreeBlocksAndClear(this, info);
+    }
+  }
+}

Copied: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java (from r1434954, hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java)
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java?p2=hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java&p1=hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java&r1=1434954&r2=1434966&rev=1434966&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java Thu Jan 17 23:38:30 2013
@@ -18,149 +18,75 @@
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.Util;
 
 /**
- * INodeFile with a link to the next element.
- * This class is used to represent the original file that is snapshotted.
- * The snapshot files are represented by {@link INodeFileSnapshot}.
- * The link of all the snapshot files and the original file form a circular
- * linked list so that all elements are accessible by any of the elements.
+ * Represent an {@link INodeFile} that is snapshotted.
+ * Note that snapshot files are represented by {@link INodeFileSnapshot}.
  */
 @InterfaceAudience.Private
-public class INodeFileWithLink extends INodeFile {
-  private INodeFileWithLink next;
+public class INodeFileWithSnapshot extends INodeFile
+    implements FileWithSnapshot {
+  private FileWithSnapshot next;
 
-  public INodeFileWithLink(INodeFile f) {
+  public INodeFileWithSnapshot(INodeFile f) {
     super(f);
-    next = this;
+    setNext(this);
   }
 
   @Override
-  public Pair<INodeFileWithLink, INodeFileSnapshot> createSnapshotCopy() {
-    return new Pair<INodeFileWithLink, INodeFileSnapshot>(this,
+  public INodeFileUnderConstructionWithSnapshot toUnderConstruction(
+      final String clientName,
+      final String clientMachine,
+      final DatanodeDescriptor clientNode) {
+    final INodeFileUnderConstructionWithSnapshot f
+        = new INodeFileUnderConstructionWithSnapshot(this,
+            clientName, clientMachine, clientNode);
+    Util.replace(this, f);
+    return f;
+  }
+
+  @Override
+  public Pair<INodeFileWithSnapshot, INodeFileSnapshot> createSnapshotCopy() {
+    return new Pair<INodeFileWithSnapshot, INodeFileSnapshot>(this,
         new INodeFileSnapshot(this));
   }
 
-  void setNext(INodeFileWithLink next) {
-    this.next = next;
+  @Override
+  public INodeFile asINodeFile() {
+    return this;
   }
 
-  INodeFileWithLink getNext() {
+  @Override
+  public FileWithSnapshot getNext() {
     return next;
   }
-  
-  /** Insert inode to the circular linked list. */
-  void insert(INodeFileWithLink inode) {
+
+  @Override
+  public void setNext(FileWithSnapshot next) {
+    this.next = next;
+  }
+
+  @Override
+  public void insert(FileWithSnapshot inode) {
     inode.setNext(this.getNext());
     this.setNext(inode);
   }
 
-  /**
-   * @return the max file replication of the elements
-   *         in the circular linked list.
-   */
   @Override
   public short getBlockReplication() {
-    short max = getFileReplication();
-    // i may be null since next will be set to null when the INode is deleted
-    for(INodeFileWithLink i = next; i != this && i != null; i = i.getNext()) {
-      final short replication = i.getFileReplication();
-      if (replication > max) {
-        max = replication;
-      }
-    }
-    return max;
+    return Util.getBlockReplication(this);
   }
 
-  /**
-   * {@inheritDoc}
-   * 
-   * Remove the current inode from the circular linked list.
-   * If some blocks at the end of the block list no longer belongs to
-   * any other inode, collect them and update the block list.
-   */
   @Override
   public int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
-    if (next == this) {
+    if (next == null || next == this) {
       // this is the only remaining inode.
-      super.collectSubtreeBlocksAndClear(info);
+      return super.collectSubtreeBlocksAndClear(info);
     } else {
-      // There are other inode(s) using the blocks.
-      // Compute max file size excluding this and find the last inode. 
-      long max = next.computeFileSize(true);
-      short maxReplication = next.getFileReplication();
-      INodeFileWithLink last = next;
-      for(INodeFileWithLink i = next.getNext(); i != this; i = i.getNext()) {
-        final long size = i.computeFileSize(true);
-        if (size > max) {
-          max = size;
-        }
-        final short rep = i.getFileReplication();
-        if (rep > maxReplication) {
-          maxReplication = rep;
-        }
-        last = i;
-      }
-
-      collectBlocksBeyondMaxAndClear(max, info);
-      
-      // remove this from the circular linked list.
-      last.next = this.next;
-      // Set the replication of the current INode to the max of all the other
-      // linked INodes, so that in case the current INode is retrieved from the
-      // blocksMap before it is removed or updated, the correct replication
-      // number can be retrieved.
-      this.setFileReplication(maxReplication, null);
-      this.next = null;
-      // clear parent
-      setParent(null);
-    }
-    return 1;
-  }
-
-  private void collectBlocksBeyondMaxAndClear(final long max,
-      final BlocksMapUpdateInfo info) {
-    final BlockInfo[] oldBlocks = getBlocks();
-    if (oldBlocks != null) {
-      //find the minimum n such that the size of the first n blocks > max
-      int n = 0;
-      for(long size = 0; n < oldBlocks.length && max > size; n++) {
-        size += oldBlocks[n].getNumBytes();
-      }
-
-      // Replace the INode for all the remaining blocks in blocksMap
-      BlocksMapINodeUpdateEntry entry = new BlocksMapINodeUpdateEntry(this,
-          next);
-      if (info != null) {
-        for (int i = 0; i < n; i++) {
-          info.addUpdateBlock(oldBlocks[i], entry);
-        }
-      }
-      
-      // starting from block n, the data is beyond max.
-      if (n < oldBlocks.length) {
-        // resize the array.  
-        final BlockInfo[] newBlocks;
-        if (n == 0) {
-          newBlocks = null;
-        } else {
-          newBlocks = new BlockInfo[n];
-          System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
-        }
-        for(INodeFileWithLink i = next; i != this; i = i.getNext()) {
-          i.setBlocks(newBlocks);
-        }
-
-        // collect the blocks beyond max.  
-        if (info != null) {
-          for(; n < oldBlocks.length; n++) {
-            info.addDeleteBlock(oldBlocks[n]);
-          }
-        }
-      }
-      setBlocks(null);
+      return Util.collectSubtreeBlocksAndClear(this, info);
     }
   }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java?rev=1434966&r1=1434965&r2=1434966&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java Thu Jan 17 23:38:30 2013
@@ -364,8 +364,7 @@ public class TestSnapshotPathINodes {
    * Test {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)} 
    * for snapshot file while modifying file after snapshot.
    */
-//  TODO: disable it temporarily since it uses append.
-//  @Test
+  @Test
   public void testSnapshotPathINodesAfterModification() throws Exception {
     //file1 was deleted, create it again.
     DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java?rev=1434966&r1=1434965&r2=1434966&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java Thu Jan 17 23:38:30 2013
@@ -270,10 +270,9 @@ public class TestSnapshot {
       Modification delete = new FileDeletion(
           node.fileList.get((node.nullFileIndex + 1) % node.fileList.size()),
           hdfs);
-//      TODO: fix append for snapshots
-//      Modification append = new FileAppend(
-//          node.fileList.get((node.nullFileIndex + 2) % node.fileList.size()),
-//          hdfs, (int) BLOCKSIZE);
+      Modification append = new FileAppend(
+          node.fileList.get((node.nullFileIndex + 2) % node.fileList.size()),
+          hdfs, (int) BLOCKSIZE);
       Modification chmod = new FileChangePermission(
           node.fileList.get((node.nullFileIndex + 3) % node.fileList.size()),
           hdfs, genRandomPermission());
@@ -290,8 +289,7 @@ public class TestSnapshot {
       
       mList.add(create);
       mList.add(delete);
-      // TODO: fix append for snapshots
-//      mList.add(append);
+      mList.add(append);
       mList.add(chmod);
       mList.add(chown);
       mList.add(replication);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java?rev=1434966&r1=1434965&r2=1434966&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java Thu Jan 17 23:38:30 2013
@@ -39,7 +39,7 @@ import org.junit.Test;
 /**
  * This class tests the replication handling/calculation of snapshots. In
  * particular, {@link INodeFile#getFileReplication()} and
- * {@link INodeFileWithLink#getBlockReplication()} are tested to make sure
+ * {@link INodeFileWithSnapshot#getBlockReplication()} are tested to make sure
  * the number of replication is calculated correctly with/without snapshots.
  */
 public class TestSnapshotReplication {
@@ -132,7 +132,7 @@ public class TestSnapshotReplication {
    *          INodes
    * @param expectedBlockRep
    *          The expected replication number that should be returned by
-   *          {@link INodeFileWithLink#getBlockReplication()} of all the INodes
+   *          {@link INodeFileWithSnapshot#getBlockReplication()} of all the INodes
    * @throws Exception
    */
   private void checkSnapshotFileReplication(Path currentFile,