You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2013/11/26 19:33:22 UTC

svn commit: r1545768 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/ src/test/java/org/apache/hadoop/hdfs/ src/test/java/...

Author: szetszwo
Date: Tue Nov 26 18:33:22 2013
New Revision: 1545768

URL: http://svn.apache.org/r1545768
Log:
HDFS-5286. Flatten INodeDirectory hierarchy: Replace INodeDirectoryWithQuota with DirectoryWithQuotaFeature.

Added:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
      - copied, changed from r1545763, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java
Removed:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java
Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1545768&r1=1545767&r2=1545768&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Nov 26 18:33:22 2013
@@ -215,6 +215,9 @@ Trunk (Unreleased)
     HDFS-5538. URLConnectionFactory should pick up the SSL related configuration 
     by default. (Haohui Mai via jing9)
 
+    HDFS-5286. Flatten INodeDirectory hierarchy: Replace INodeDirectoryWithQuota
+    with DirectoryWithQuotaFeature.  (szetszwo)
+
   OPTIMIZATIONS
     HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
 

Copied: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java (from r1545763, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java)
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java?p2=hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java&p1=hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java&r1=1545763&r2=1545768&rev=1545768&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java Tue Nov 26 18:33:22 2013
@@ -17,121 +17,76 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
- * Directory INode class that has a quota restriction
+ * Quota feature for {@link INodeDirectory}. 
  */
-public class INodeDirectoryWithQuota extends INodeDirectory {
+public final class DirectoryWithQuotaFeature extends INodeDirectory.Feature {
+  public static final long DEFAULT_NAMESPACE_QUOTA = Long.MAX_VALUE;
+  public static final long DEFAULT_DISKSPACE_QUOTA = HdfsConstants.QUOTA_RESET;
+
   /** Name space quota */
-  private long nsQuota = Long.MAX_VALUE;
+  private long nsQuota = DEFAULT_NAMESPACE_QUOTA;
   /** Name space count */
   private long namespace = 1L;
   /** Disk space quota */
-  private long dsQuota = HdfsConstants.QUOTA_RESET;
+  private long dsQuota = DEFAULT_DISKSPACE_QUOTA;
   /** Disk space count */
   private long diskspace = 0L;
   
-  /** Convert an existing directory inode to one with the given quota
-   * 
-   * @param nsQuota Namespace quota to be assigned to this inode
-   * @param dsQuota Diskspace quota to be assigned to this indoe
-   * @param other The other inode from which all other properties are copied
-   */
-  INodeDirectoryWithQuota(INodeDirectory other, boolean adopt,
-      long nsQuota, long dsQuota) {
-    super(other, adopt);
-    final Quota.Counts counts = other.computeQuotaUsage();
-    this.namespace = counts.get(Quota.NAMESPACE);
-    this.diskspace = counts.get(Quota.DISKSPACE);
+  DirectoryWithQuotaFeature(long nsQuota, long dsQuota) {
     this.nsQuota = nsQuota;
     this.dsQuota = dsQuota;
   }
-  
-  public INodeDirectoryWithQuota(INodeDirectory other, boolean adopt,
-      Quota.Counts quota) {
-    this(other, adopt, quota.get(Quota.NAMESPACE), quota.get(Quota.DISKSPACE));
-  }
 
-  /** constructor with no quota verification */
-  INodeDirectoryWithQuota(long id, byte[] name, PermissionStatus permissions,
-      long modificationTime, long nsQuota, long dsQuota) {
-    super(id, name, permissions, modificationTime);
-    this.nsQuota = nsQuota;
-    this.dsQuota = dsQuota;
-  }
-  
-  /** constructor with no quota verification */
-  INodeDirectoryWithQuota(long id, byte[] name, PermissionStatus permissions) {
-    super(id, name, permissions, 0L);
-  }
-  
-  @Override
-  public Quota.Counts getQuotaCounts() {
+  /** @return the quota set or -1 if it is not set. */
+  Quota.Counts getQuota() {
     return Quota.Counts.newInstance(nsQuota, dsQuota);
   }
   
   /** Set this directory's quota
    * 
    * @param nsQuota Namespace quota to be set
-   * @param dsQuota diskspace quota to be set
+   * @param dsQuota Diskspace quota to be set
    */
-  public void setQuota(long nsQuota, long dsQuota) {
+  void setQuota(long nsQuota, long dsQuota) {
     this.nsQuota = nsQuota;
     this.dsQuota = dsQuota;
   }
   
-  @Override
-  public Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache,
-      int lastSnapshotId) {
-    if (useCache && isQuotaSet()) {
-      // use cache value
-      counts.add(Quota.NAMESPACE, namespace);
-      counts.add(Quota.DISKSPACE, diskspace);
-    } else {
-      super.computeQuotaUsage(counts, false, lastSnapshotId);
-    }
+  Quota.Counts addNamespaceDiskspace(Quota.Counts counts) {
+    counts.add(Quota.NAMESPACE, namespace);
+    counts.add(Quota.DISKSPACE, diskspace);
     return counts;
   }
 
-  @Override
-  public ContentSummaryComputationContext computeContentSummary(
+  ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir,
       final ContentSummaryComputationContext summary) {
     final long original = summary.getCounts().get(Content.DISKSPACE);
     long oldYieldCount = summary.getYieldCount();
-    super.computeContentSummary(summary);
+    dir.computeDirectoryContentSummary(summary);
     // Check only when the content has not changed in the middle.
     if (oldYieldCount == summary.getYieldCount()) {
-      checkDiskspace(summary.getCounts().get(Content.DISKSPACE) - original);
+      checkDiskspace(dir, summary.getCounts().get(Content.DISKSPACE) - original);
     }
     return summary;
   }
   
-  private void checkDiskspace(final long computed) {
-    if (-1 != getQuotaCounts().get(Quota.DISKSPACE) && diskspace != computed) {
+  private void checkDiskspace(final INodeDirectory dir, final long computed) {
+    if (-1 != getQuota().get(Quota.DISKSPACE) && diskspace != computed) {
       NameNode.LOG.error("BUG: Inconsistent diskspace for directory "
-          + getFullPathName() + ". Cached = " + diskspace
+          + dir.getFullPathName() + ". Cached = " + diskspace
           + " != Computed = " + computed);
     }
   }
 
-  /** Get the number of names in the subtree rooted at this directory
-   * @return the size of the subtree rooted at this directory
-   */
-  long numItemsInTree() {
-    return namespace;
-  }
-  
-  @Override
-  public final void addSpaceConsumed(final long nsDelta, final long dsDelta,
-      boolean verify) throws QuotaExceededException {
-    if (isQuotaSet()) { 
+  void addSpaceConsumed(final INodeDirectory dir, final long nsDelta,
+      final long dsDelta, boolean verify) throws QuotaExceededException {
+    if (dir.isQuotaSet()) { 
       // The following steps are important: 
       // check quotas in this inode and all ancestors before changing counts
       // so that no change is made if there is any quota violation.
@@ -141,11 +96,11 @@ public class INodeDirectoryWithQuota ext
         verifyQuota(nsDelta, dsDelta);
       }
       // (2) verify quota and then add count in ancestors 
-      super.addSpaceConsumed(nsDelta, dsDelta, verify);
+      dir.addSpaceConsumed2Parent(nsDelta, dsDelta, verify);
       // (3) add count in this inode
       addSpaceConsumed2Cache(nsDelta, dsDelta);
     } else {
-      super.addSpaceConsumed(nsDelta, dsDelta, verify);
+      dir.addSpaceConsumed2Parent(nsDelta, dsDelta, verify);
     }
   }
   
@@ -154,7 +109,7 @@ public class INodeDirectoryWithQuota ext
    * @param nsDelta the change of the tree size
    * @param dsDelta change to disk space occupied
    */
-  protected void addSpaceConsumed2Cache(long nsDelta, long dsDelta) {
+  public void addSpaceConsumed2Cache(long nsDelta, long dsDelta) {
     namespace += nsDelta;
     diskspace += dsDelta;
   }
@@ -172,41 +127,42 @@ public class INodeDirectoryWithQuota ext
     this.diskspace = diskspace;
   }
   
+  /** @return the namespace and diskspace consumed. */
+  public Quota.Counts getSpaceConsumed() {
+    return Quota.Counts.newInstance(namespace, diskspace);
+  }
+
   /** Verify if the namespace quota is violated after applying delta. */
-  void verifyNamespaceQuota(long delta) throws NSQuotaExceededException {
+  private void verifyNamespaceQuota(long delta) throws NSQuotaExceededException {
     if (Quota.isViolated(nsQuota, namespace, delta)) {
       throw new NSQuotaExceededException(nsQuota, namespace + delta);
     }
   }
+  /** Verify if the diskspace quota is violated after applying delta. */
+  private void verifyDiskspaceQuota(long delta) throws DSQuotaExceededException {
+    if (Quota.isViolated(dsQuota, diskspace, delta)) {
+      throw new DSQuotaExceededException(dsQuota, diskspace + delta);
+    }
+  }
 
-  /** Verify if the namespace count disk space satisfies the quota restriction 
-   * @throws QuotaExceededException if the given quota is less than the count
+  /**
+   * @throws QuotaExceededException if namespace or diskspace quotas is
+   *         violated after applying the deltas.
    */
   void verifyQuota(long nsDelta, long dsDelta) throws QuotaExceededException {
     verifyNamespaceQuota(nsDelta);
-
-    if (Quota.isViolated(dsQuota, diskspace, dsDelta)) {
-      throw new DSQuotaExceededException(dsQuota, diskspace + dsDelta);
-    }
+    verifyDiskspaceQuota(dsDelta);
   }
 
-  String namespaceString() {
+  private String namespaceString() {
     return "namespace: " + (nsQuota < 0? "-": namespace + "/" + nsQuota);
   }
-  String diskspaceString() {
+  private String diskspaceString() {
     return "diskspace: " + (dsQuota < 0? "-": diskspace + "/" + dsQuota);
   }
-  String quotaString() {
-    return ", Quota[" + namespaceString() + ", " + diskspaceString() + "]";
-  }
   
-  @VisibleForTesting
-  public long getNamespace() {
-    return this.namespace;
-  }
-  
-  @VisibleForTesting
-  public long getDiskspace() {
-    return this.diskspace;
+  @Override
+  public String toString() {
+    return "Quota[" + namespaceString() + ", " + diskspaceString() + "]";
   }
 }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1545768&r1=1545767&r2=1545768&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Tue Nov 26 18:33:22 2013
@@ -86,11 +86,15 @@ import com.google.common.base.Preconditi
  * 
  *************************************************/
 public class FSDirectory implements Closeable {
-  private static INodeDirectoryWithQuota createRoot(FSNamesystem namesystem) {
-    final INodeDirectoryWithQuota r = new INodeDirectoryWithQuota(
+  private static INodeDirectorySnapshottable createRoot(FSNamesystem namesystem) {
+    final INodeDirectory r = new INodeDirectory(
         INodeId.ROOT_INODE_ID,
         INodeDirectory.ROOT_NAME,
-        namesystem.createFsOwnerPermissions(new FsPermission((short) 0755)));
+        namesystem.createFsOwnerPermissions(new FsPermission((short) 0755)),
+        0L);
+    r.addDirectoryWithQuotaFeature(
+        DirectoryWithQuotaFeature.DEFAULT_NAMESPACE_QUOTA,
+        DirectoryWithQuotaFeature.DEFAULT_DISKSPACE_QUOTA);
     final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(r);
     s.setSnapshotQuota(0);
     return s;
@@ -106,7 +110,7 @@ public class FSDirectory implements Clos
   public final static String DOT_INODES_STRING = ".inodes";
   public final static byte[] DOT_INODES = 
       DFSUtil.string2Bytes(DOT_INODES_STRING);
-  INodeDirectoryWithQuota rootDir;
+  INodeDirectory rootDir;
   FSImage fsImage;  
   private final FSNamesystem namesystem;
   private volatile boolean ready = false;
@@ -201,7 +205,7 @@ public class FSDirectory implements Clos
   }
 
   /** @return the root directory inode. */
-  public INodeDirectoryWithQuota getRoot() {
+  public INodeDirectory getRoot() {
     return rootDir;
   }
 
@@ -1799,9 +1803,8 @@ public class FSDirectory implements Clos
     final INode[] inodes = inodesInPath.getINodes();
     for(int i=0; i < numOfINodes; i++) {
       if (inodes[i].isQuotaSet()) { // a directory with quota
-        INodeDirectoryWithQuota node = (INodeDirectoryWithQuota) inodes[i]
-            .asDirectory(); 
-        node.addSpaceConsumed2Cache(nsDelta, dsDelta);
+        inodes[i].asDirectory().getDirectoryWithQuotaFeature()
+            .addSpaceConsumed2Cache(nsDelta, dsDelta);
       }
     }
   }
@@ -2034,10 +2037,11 @@ public class FSDirectory implements Clos
         // Stop checking for quota when common ancestor is reached
         return;
       }
-      if (inodes[i].isQuotaSet()) { // a directory with quota
+      final DirectoryWithQuotaFeature q
+          = inodes[i].asDirectory().getDirectoryWithQuotaFeature();
+      if (q != null) { // a directory with quota
         try {
-          ((INodeDirectoryWithQuota) inodes[i].asDirectory()).verifyQuota(
-              nsDelta, dsDelta);
+          q.verifyQuota(nsDelta, dsDelta);
         } catch (QuotaExceededException e) {
           e.setPathName(getFullPathName(inodes, i));
           throw e;
@@ -2384,35 +2388,14 @@ public class FSDirectory implements Clos
       if (dsQuota == HdfsConstants.QUOTA_DONT_SET) {
         dsQuota = oldDsQuota;
       }        
+      if (oldNsQuota == nsQuota && oldDsQuota == dsQuota) {
+        return null;
+      }
 
       final Snapshot latest = iip.getLatestSnapshot();
-      if (dirNode instanceof INodeDirectoryWithQuota) {
-        INodeDirectoryWithQuota quotaNode = (INodeDirectoryWithQuota) dirNode;
-        Quota.Counts counts = null;
-        if (!quotaNode.isQuotaSet()) {
-          // dirNode must be an INodeDirectoryWithSnapshot whose quota has not
-          // been set yet
-          counts = quotaNode.computeQuotaUsage();
-        }
-        // a directory with quota; so set the quota to the new value
-        quotaNode.setQuota(nsQuota, dsQuota);
-        if (quotaNode.isQuotaSet() && counts != null) {
-          quotaNode.setSpaceConsumed(counts.get(Quota.NAMESPACE),
-              counts.get(Quota.DISKSPACE));
-        } else if (!quotaNode.isQuotaSet() && latest == null) {
-          // do not replace the node if the node is a snapshottable directory
-          // without snapshots
-          if (!(quotaNode instanceof INodeDirectoryWithSnapshot)) {
-            // will not come here for root because root is snapshottable and
-            // root's nsQuota is always set
-            return quotaNode.replaceSelf4INodeDirectory(inodeMap);
-          }
-        }
-      } else {
-        // a non-quota directory; so replace it with a directory with quota
-        return dirNode.replaceSelf4Quota(latest, nsQuota, dsQuota, inodeMap);
-      }
-      return (oldNsQuota != nsQuota || oldDsQuota != dsQuota) ? dirNode : null;
+      dirNode = dirNode.recordModification(latest, inodeMap);
+      dirNode.setQuota(nsQuota, dsQuota);
+      return dirNode;
     }
   }
   
@@ -2441,7 +2424,8 @@ public class FSDirectory implements Clos
   long totalInodes() {
     readLock();
     try {
-      return rootDir.numItemsInTree();
+      return rootDir.getDirectoryWithQuotaFeature().getSpaceConsumed()
+          .get(Quota.NAMESPACE);
     } finally {
       readUnlock();
     }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1545768&r1=1545767&r2=1545768&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Tue Nov 26 18:33:22 2013
@@ -755,7 +755,7 @@ public class FSImage implements Closeabl
    * This is an update of existing state of the filesystem and does not
    * throw QuotaExceededException.
    */
-  static void updateCountForQuota(INodeDirectoryWithQuota root) {
+  static void updateCountForQuota(INodeDirectory root) {
     updateCountForQuotaRecursively(root, Quota.Counts.newInstance());
   }
   
@@ -795,7 +795,7 @@ public class FSImage implements Closeabl
             + " quota = " + dsQuota + " < consumed = " + diskspace);
       }
 
-      ((INodeDirectoryWithQuota)dir).setSpaceConsumed(namespace, diskspace);
+      dir.getDirectoryWithQuotaFeature().setSpaceConsumed(namespace, diskspace);
     }
   }
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1545768&r1=1545767&r2=1545768&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Tue Nov 26 18:33:22 2013
@@ -375,7 +375,7 @@ public class FSImageFormat {
     final long dsQuota = q.get(Quota.DISKSPACE);
     FSDirectory fsDir = namesystem.dir;
     if (nsQuota != -1 || dsQuota != -1) {
-      fsDir.rootDir.setQuota(nsQuota, dsQuota);
+      fsDir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota);
     }
     fsDir.rootDir.cloneModificationTime(root);
     fsDir.rootDir.clonePermissionStatus(root);    
@@ -729,10 +729,11 @@ public class FSImageFormat {
       if (counter != null) {
         counter.increment();
       }
-      final INodeDirectory dir = nsQuota >= 0 || dsQuota >= 0?
-          new INodeDirectoryWithQuota(inodeId, localName, permissions,
-              modificationTime, nsQuota, dsQuota)
-          : new INodeDirectory(inodeId, localName, permissions, modificationTime);
+      final INodeDirectory dir = new INodeDirectory(inodeId, localName,
+          permissions, modificationTime);
+      if (nsQuota >= 0 || dsQuota >= 0) {
+        dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota);
+      }
       return snapshottable ? new INodeDirectorySnapshottable(dir)
           : withSnapshot ? new INodeDirectoryWithSnapshot(dir)
           : dir;
@@ -972,13 +973,14 @@ public class FSImageFormat {
       checkNotSaved();
 
       final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
-      FSDirectory fsDir = sourceNamesystem.dir;
+      final INodeDirectory rootDir = sourceNamesystem.dir.rootDir;
+      final long numINodes = rootDir.getDirectoryWithQuotaFeature()
+          .getSpaceConsumed().get(Quota.NAMESPACE);
       String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath();
       Step step = new Step(StepType.INODES, sdPath);
       StartupProgress prog = NameNode.getStartupProgress();
       prog.beginStep(Phase.SAVING_CHECKPOINT, step);
-      prog.setTotal(Phase.SAVING_CHECKPOINT, step,
-        fsDir.rootDir.numItemsInTree());
+      prog.setTotal(Phase.SAVING_CHECKPOINT, step, numINodes);
       Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
       long startTime = now();
       //
@@ -997,7 +999,7 @@ public class FSImageFormat {
         // fairness-related deadlock. See the comments on HDFS-2223.
         out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
             .getNamespaceID());
-        out.writeLong(fsDir.rootDir.numItemsInTree());
+        out.writeLong(numINodes);
         out.writeLong(sourceNamesystem.getGenerationStampV1());
         out.writeLong(sourceNamesystem.getGenerationStampV2());
         out.writeLong(sourceNamesystem.getGenerationStampAtblockIdSwitch());
@@ -1014,14 +1016,13 @@ public class FSImageFormat {
                  " using " + compression);
 
         // save the root
-        saveINode2Image(fsDir.rootDir, out, false, referenceMap, counter);
+        saveINode2Image(rootDir, out, false, referenceMap, counter);
         // save the rest of the nodes
-        saveImage(fsDir.rootDir, out, true, false, counter);
+        saveImage(rootDir, out, true, false, counter);
         prog.endStep(Phase.SAVING_CHECKPOINT, step);
         // Now that the step is finished, set counter equal to total to adjust
         // for possible under-counting due to reference inodes.
-        prog.setCount(Phase.SAVING_CHECKPOINT, step,
-          fsDir.rootDir.numItemsInTree());
+        prog.setCount(Phase.SAVING_CHECKPOINT, step, numINodes);
         // save files under construction
         // TODO: for HDFS-5428, since we cannot break the compatibility of 
         // fsimage, we store part of the under-construction files that are only

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1545768&r1=1545767&r2=1545768&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java Tue Nov 26 18:33:22 2013
@@ -406,6 +406,15 @@ public abstract class INode implements I
    */
   public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify) 
       throws QuotaExceededException {
+    addSpaceConsumed2Parent(nsDelta, dsDelta, verify);
+  }
+
+  /**
+   * Check and add namespace/diskspace consumed to itself and the ancestors.
+   * @throws QuotaExceededException if quote is violated.
+   */
+  void addSpaceConsumed2Parent(long nsDelta, long dsDelta, boolean verify) 
+      throws QuotaExceededException {
     if (parent != null) {
       parent.addSpaceConsumed(nsDelta, dsDelta, verify);
     }
@@ -744,4 +753,51 @@ public abstract class INode implements I
       toDeleteList.clear();
     }
   }
+
+  /** INode feature such as {@link FileUnderConstructionFeature}
+   *  and {@link DirectoryWithQuotaFeature}.
+   */
+  interface Feature<F extends Feature<F>> {
+    /** @return the next feature. */
+    public F getNextFeature();
+
+    /** Set the next feature. */
+    public void setNextFeature(F next);
+
+    /** Utility methods such as addFeature and removeFeature. */
+    static class Util {
+      /**
+       * Add a feature to the linked list.
+       * @return the new head.
+       */
+      static <F extends Feature<F>> F addFeature(F feature, F head) {
+        feature.setNextFeature(head);
+        return feature;
+      }
+
+      /**
+       * Remove a feature from the linked list.
+       * @return the new head.
+       */
+      static <F extends Feature<F>> F removeFeature(F feature, F head) {
+        if (feature == head) {
+          final F newHead = head.getNextFeature();
+          head.setNextFeature(null);
+          return newHead;
+        } else if (head != null) {
+          F prev = head;
+          F curr = head.getNextFeature();
+          for (; curr != null && curr != feature;
+              prev = curr, curr = curr.getNextFeature())
+            ;
+          if (curr != null) {
+            prev.setNextFeature(curr.getNextFeature());
+            curr.setNextFeature(null);
+            return head;
+          }
+        }
+        throw new IllegalStateException("Feature " + feature + " not found.");
+      }
+    }
+  }
 }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1545768&r1=1545767&r2=1545768&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Tue Nov 26 18:33:22 2013
@@ -46,6 +46,21 @@ import com.google.common.base.Preconditi
  */
 public class INodeDirectory extends INodeWithAdditionalFields
     implements INodeDirectoryAttributes {
+  /** Directory related features such as quota and snapshots. */
+  public static abstract class Feature implements INode.Feature<Feature> {
+    private Feature nextFeature;
+
+    @Override
+    public Feature getNextFeature() {
+      return nextFeature;
+    }
+
+    @Override
+    public void setNextFeature(Feature next) {
+      this.nextFeature = next;
+    }
+  }
+
   /** Cast INode to INodeDirectory. */
   public static INodeDirectory valueOf(INode inode, Object path
       ) throws FileNotFoundException, PathIsNotDirectoryException {
@@ -63,6 +78,9 @@ public class INodeDirectory extends INod
   final static byte[] ROOT_NAME = DFSUtil.string2Bytes("");
 
   private List<INode> children = null;
+  
+  /** A linked list of {@link Feature}s. */
+  private Feature headFeature = null;
 
   /** constructor */
   public INodeDirectory(long id, byte[] name, PermissionStatus permissions,
@@ -76,7 +94,7 @@ public class INodeDirectory extends INod
    * @param adopt Indicate whether or not need to set the parent field of child
    *              INodes to the new node
    */
-  public INodeDirectory(INodeDirectory other, boolean adopt) {
+  public INodeDirectory(INodeDirectory other, boolean adopt, boolean copyFeatures) {
     super(other);
     this.children = other.children;
     if (adopt && this.children != null) {
@@ -84,6 +102,9 @@ public class INodeDirectory extends INod
         child.setParent(this);
       }
     }
+    if (copyFeatures) {
+      this.headFeature = other.headFeature;
+    }
   }
 
   /** @return true unconditionally. */
@@ -103,6 +124,73 @@ public class INodeDirectory extends INod
     return false;
   }
 
+  void setQuota(long nsQuota, long dsQuota) {
+    DirectoryWithQuotaFeature quota = getDirectoryWithQuotaFeature();
+    if (quota != null) {
+      // already has quota; so set the quota to the new values
+      quota.setQuota(nsQuota, dsQuota);
+      if (!isQuotaSet() && !isRoot()) {
+        removeFeature(quota);
+      }
+    } else {
+      final Quota.Counts c = computeQuotaUsage();
+      quota = addDirectoryWithQuotaFeature(nsQuota, dsQuota);
+      quota.setSpaceConsumed(c.get(Quota.NAMESPACE), c.get(Quota.DISKSPACE));
+    }
+  }
+
+  @Override
+  public Quota.Counts getQuotaCounts() {
+    final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
+    return q != null? q.getQuota(): super.getQuotaCounts();
+  }
+
+  @Override
+  public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify) 
+      throws QuotaExceededException {
+    final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
+    if (q != null) {
+      q.addSpaceConsumed(this, nsDelta, dsDelta, verify);
+    } else {
+      addSpaceConsumed2Parent(nsDelta, dsDelta, verify);
+    }
+  }
+
+  /**
+   * If the directory contains a {@link DirectoryWithQuotaFeature}, return it;
+   * otherwise, return null.
+   */
+  public final DirectoryWithQuotaFeature getDirectoryWithQuotaFeature() {
+    for(Feature f = headFeature; f != null; f = f.nextFeature) {
+      if (f instanceof DirectoryWithQuotaFeature) {
+        return (DirectoryWithQuotaFeature)f;
+      }
+    }
+    return null;
+  }
+
+  /** Is this directory with quota? */
+  final boolean isWithQuota() {
+    return getDirectoryWithQuotaFeature() != null;
+  }
+
+  DirectoryWithQuotaFeature addDirectoryWithQuotaFeature(
+      long nsQuota, long dsQuota) {
+    Preconditions.checkState(!isWithQuota(), "Directory is already with quota");
+    final DirectoryWithQuotaFeature quota = new DirectoryWithQuotaFeature(
+        nsQuota, dsQuota);
+    addFeature(quota);
+    return quota;
+  }
+
+  private void addFeature(Feature f) {
+    headFeature = INode.Feature.Util.addFeature(f, headFeature);
+  }
+
+  private void removeFeature(Feature f) {
+    headFeature = INode.Feature.Util.removeFeature(f, headFeature);
+  }
+
   private int searchChildren(byte[] name) {
     return children == null? -1: Collections.binarySearch(children, name);
   }
@@ -142,27 +230,6 @@ public class INodeDirectory extends INod
     return true;
   }
 
-  /**
-   * Replace itself with {@link INodeDirectoryWithQuota} or
-   * {@link INodeDirectoryWithSnapshot} depending on the latest snapshot.
-   */
-  INodeDirectoryWithQuota replaceSelf4Quota(final Snapshot latest,
-      final long nsQuota, final long dsQuota, final INodeMap inodeMap)
-      throws QuotaExceededException {
-    Preconditions.checkState(!(this instanceof INodeDirectoryWithQuota),
-        "this is already an INodeDirectoryWithQuota, this=%s", this);
-
-    if (!this.isInLatestSnapshot(latest)) {
-      final INodeDirectoryWithQuota q = new INodeDirectoryWithQuota(
-          this, true, nsQuota, dsQuota);
-      replaceSelf(q, inodeMap);
-      return q;
-    } else {
-      final INodeDirectoryWithSnapshot s = new INodeDirectoryWithSnapshot(this);
-      s.setQuota(nsQuota, dsQuota);
-      return replaceSelf(s, inodeMap).saveSelf2Snapshot(latest, this);
-    }
-  }
   /** Replace itself with an {@link INodeDirectorySnapshottable}. */
   public INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable(
       Snapshot latest, final INodeMap inodeMap) throws QuotaExceededException {
@@ -183,7 +250,7 @@ public class INodeDirectory extends INod
   public INodeDirectory replaceSelf4INodeDirectory(final INodeMap inodeMap) {
     Preconditions.checkState(getClass() != INodeDirectory.class,
         "the class is already INodeDirectory, this=%s", this);
-    return replaceSelf(new INodeDirectory(this, true), inodeMap);
+    return replaceSelf(new INodeDirectory(this, true, true), inodeMap);
   }
 
   /** Replace itself with the given directory. */
@@ -439,6 +506,21 @@ public class INodeDirectory extends INod
   @Override
   public Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache,
       int lastSnapshotId) {
+    final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
+    if (q != null) {
+      if (useCache && isQuotaSet()) {
+        q.addNamespaceDiskspace(counts);
+      } else {
+        computeDirectoryQuotaUsage(counts, false, lastSnapshotId);
+      }
+      return counts;
+    } else {
+      return computeDirectoryQuotaUsage(counts, useCache, lastSnapshotId);
+    }
+  }
+
+  Quota.Counts computeDirectoryQuotaUsage(Quota.Counts counts, boolean useCache,
+      int lastSnapshotId) {
     if (children != null) {
       for (INode child : children) {
         child.computeQuotaUsage(counts, useCache, lastSnapshotId);
@@ -456,6 +538,16 @@ public class INodeDirectory extends INod
   @Override
   public ContentSummaryComputationContext computeContentSummary(
       ContentSummaryComputationContext summary) {
+    final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
+    if (q != null) {
+      return q.computeContentSummary(this, summary);
+    } else {
+      return computeDirectoryContentSummary(summary);
+    }
+  }
+
+  ContentSummaryComputationContext computeDirectoryContentSummary(
+      ContentSummaryComputationContext summary) {
     ReadOnlyList<INode> childrenList = getChildrenList(null);
     // Explicit traversing is done to enable repositioning after relinquishing
     // and reacquiring locks.
@@ -570,7 +662,7 @@ public class INodeDirectory extends INod
       Quota.Counts counts = cleanSubtreeRecursively(snapshot, prior,
           collectedBlocks, removedINodes, null, countDiffChange);
       if (isQuotaSet()) {
-        ((INodeDirectoryWithQuota) this).addSpaceConsumed2Cache(
+        getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(
             -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
       }
       return counts;
@@ -606,8 +698,9 @@ public class INodeDirectory extends INod
       final Snapshot snapshot) {
     super.dumpTreeRecursively(out, prefix, snapshot);
     out.print(", childrenSize=" + getChildrenList(snapshot).size());
-    if (this instanceof INodeDirectoryWithQuota) {
-      out.print(((INodeDirectoryWithQuota)this).quotaString());
+    final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
+    if (q != null) {
+      out.print(", " + q);
     }
     if (this instanceof Snapshot.Root) {
       out.print(", snapshotId=" + snapshot.getId());

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1545768&r1=1545767&r2=1545768&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Tue Nov 26 18:33:22 2013
@@ -50,13 +50,15 @@ public class INodeFile extends INodeWith
    * A feature contains specific information for a type of INodeFile. E.g.,
    * we can have separate features for Under-Construction and Snapshot.
    */
-  public static abstract class Feature {
+  public static abstract class Feature implements INode.Feature<Feature> {
     private Feature nextFeature;
 
+    @Override
     public Feature getNextFeature() {
       return nextFeature;
     }
 
+    @Override
     public void setNextFeature(Feature next) {
       this.nextFeature = next;
     }
@@ -160,26 +162,12 @@ public class INodeFile extends INodeWith
     return getFileUnderConstructionFeature() != null;
   }
 
-  void addFeature(Feature f) {
-    f.nextFeature = headFeature;
-    headFeature = f;
+  private void addFeature(Feature f) {
+    headFeature = INode.Feature.Util.addFeature(f, headFeature);
   }
 
-  void removeFeature(Feature f) {
-    if (f == headFeature) {
-      headFeature = headFeature.nextFeature;
-      return;
-    } else if (headFeature != null) {
-      Feature prev = headFeature;
-      Feature curr = headFeature.nextFeature;
-      for (; curr != null && curr != f; prev = curr, curr = curr.nextFeature)
-        ;
-      if (curr != null) {
-        prev.nextFeature = curr.nextFeature;
-        return;
-      }
-    }
-    throw new IllegalStateException("Feature " + f + " not found.");
+  private void removeFeature(Feature f) {
+    headFeature = INode.Feature.Util.removeFeature(f, headFeature);
   }
 
   /** @return true unconditionally. */
@@ -197,7 +185,7 @@ public class INodeFile extends INodeWith
   /* Start of Under-Construction Feature */
 
   /** Convert this file to an {@link INodeFileUnderConstruction}. */
-  public INodeFile toUnderConstruction(String clientName, String clientMachine,
+  INodeFile toUnderConstruction(String clientName, String clientMachine,
       DatanodeDescriptor clientNode) {
     Preconditions.checkState(!isUnderConstruction(),
         "file is already an INodeFileUnderConstruction");

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java?rev=1545768&r1=1545767&r2=1545768&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java Tue Nov 26 18:33:22 2013
@@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
-import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
 import org.apache.hadoop.hdfs.server.namenode.INodeMap;
 import org.apache.hadoop.hdfs.server.namenode.INodeReference;
 import org.apache.hadoop.hdfs.server.namenode.Quota;
@@ -55,7 +54,7 @@ import com.google.common.base.Preconditi
  * storing snapshot data. When there are modifications to the directory, the old
  * data is stored in the latest snapshot, if there is any.
  */
-public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
+public class INodeDirectoryWithSnapshot extends INodeDirectory {
   /**
    * The difference between the current state and a previous snapshot
    * of the children list of an INodeDirectory.
@@ -486,7 +485,7 @@ public class INodeDirectoryWithSnapshot 
 
   INodeDirectoryWithSnapshot(INodeDirectory that, boolean adopt,
       DirectoryDiffList diffs) {
-    super(that, adopt, that.getQuotaCounts());
+    super(that, adopt, true);
     this.diffs = diffs != null? diffs: new DirectoryDiffList();
   }
 
@@ -771,8 +770,8 @@ public class INodeDirectoryWithSnapshot 
         removedINodes, priorDeleted, countDiffChange));
     
     if (isQuotaSet()) {
-      this.addSpaceConsumed2Cache(-counts.get(Quota.NAMESPACE),
-          -counts.get(Quota.DISKSPACE));
+      getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(
+          -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
     }
     return counts;
   }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java?rev=1545768&r1=1545767&r2=1545768&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java Tue Nov 26 18:33:22 2013
@@ -136,7 +136,7 @@ public class Snapshot implements Compara
   /** The root directory of the snapshot. */
   static public class Root extends INodeDirectory {
     Root(INodeDirectory other) {
-      super(other, false);
+      super(other, false, false);
     }
 
     @Override

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java?rev=1545768&r1=1545767&r2=1545768&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java Tue Nov 26 18:33:22 2013
@@ -368,10 +368,7 @@ public class TestQuota {
     // be identical.
     conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-    final FileSystem fs = cluster.getFileSystem();
-    assertTrue("Not a HDFS: "+fs.getUri(),
-                fs instanceof DistributedFileSystem);
-    final DistributedFileSystem dfs = (DistributedFileSystem)fs;
+    final DistributedFileSystem dfs = cluster.getFileSystem();
     
     try {
       // 1: create directory /nqdir0/qdir1/qdir20/nqdir30

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java?rev=1545768&r1=1545767&r2=1545768&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java Tue Nov 26 18:33:22 2013
@@ -158,7 +158,7 @@ public class TestFSImageWithSnapshot {
     try {
       loader.load(imageFile);
       FSImage.updateCountForQuota(
-          (INodeDirectoryWithQuota)fsn.getFSDirectory().getINode("/"));
+          INodeDirectory.valueOf(fsn.getFSDirectory().getINode("/"), "/"));
     } finally {
       fsn.getFSDirectory().writeUnlock();
       fsn.writeUnlock();

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java?rev=1545768&r1=1545767&r2=1545768&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java Tue Nov 26 18:33:22 2013
@@ -49,7 +49,7 @@ public class TestFsLimits {
   static PermissionStatus perms
     = new PermissionStatus("admin", "admin", FsPermission.getDefault());
 
-  static INodeDirectoryWithQuota rootInode;
+  static INodeDirectory rootInode;
 
   static private FSNamesystem getMockNamesystem() {
     FSNamesystem fsn = mock(FSNamesystem.class);
@@ -75,8 +75,8 @@ public class TestFsLimits {
              fileAsURI(new File(MiniDFSCluster.getBaseDirectory(),
                                 "namenode")).toString());
 
-    rootInode = new INodeDirectoryWithQuota(getMockNamesystem()
-        .allocateNewInodeId(), INodeDirectory.ROOT_NAME, perms);
+    rootInode = new INodeDirectory(getMockNamesystem().allocateNewInodeId(),
+        INodeDirectory.ROOT_NAME, perms, 0L);
     inodes = new INode[]{ rootInode, null };
     fs = null;
     fsIsReady = true;

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1545768&r1=1545767&r2=1545768&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Tue Nov 26 18:33:22 2013
@@ -211,9 +211,9 @@ public class TestINodeFile {
       // Call FSDirectory#unprotectedSetQuota which calls
       // INodeDirectory#replaceChild
       dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10);
-      INode dirNode = fsdir.getINode(dir.toString());
+      INodeDirectory dirNode = getDir(fsdir, dir);
       assertEquals(dir.toString(), dirNode.getFullPathName());
-      assertTrue(dirNode instanceof INodeDirectoryWithQuota);
+      assertTrue(dirNode.isWithQuota());
       
       final Path newDir = new Path("/newdir");
       final Path newFile = new Path(newDir, "file");
@@ -871,6 +871,12 @@ public class TestINodeFile {
     }
   }
   
+  private static INodeDirectory getDir(final FSDirectory fsdir, final Path dir)
+      throws IOException {
+    final String dirStr = dir.toString();
+    return INodeDirectory.valueOf(fsdir.getINode(dirStr), dirStr);
+  }
+
   /**
    * Test whether the inode in inodeMap has been replaced after regular inode
    * replacement
@@ -887,21 +893,20 @@ public class TestINodeFile {
 
       final Path dir = new Path("/dir");
       hdfs.mkdirs(dir);
-      INode dirNode = fsdir.getINode(dir.toString());
+      INodeDirectory dirNode = getDir(fsdir, dir);
       INode dirNodeFromNode = fsdir.getInode(dirNode.getId());
       assertSame(dirNode, dirNodeFromNode);
 
       // set quota to dir, which leads to node replacement
       hdfs.setQuota(dir, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
-      dirNode = fsdir.getINode(dir.toString());
-      assertTrue(dirNode instanceof INodeDirectoryWithQuota);
+      dirNode = getDir(fsdir, dir);
+      assertTrue(dirNode.isWithQuota());
       // the inode in inodeMap should also be replaced
       dirNodeFromNode = fsdir.getInode(dirNode.getId());
       assertSame(dirNode, dirNodeFromNode);
 
       hdfs.setQuota(dir, -1, -1);
-      dirNode = fsdir.getINode(dir.toString());
-      assertTrue(dirNode instanceof INodeDirectory);
+      dirNode = getDir(fsdir, dir);
       // the inode in inodeMap should also be replaced
       dirNodeFromNode = fsdir.getInode(dirNode.getId());
       assertSame(dirNode, dirNodeFromNode);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java?rev=1545768&r1=1545767&r2=1545768&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java Tue Nov 26 18:33:22 2013
@@ -1190,13 +1190,15 @@ public class TestRenameWithSnapshots {
     assertFalse(hdfs.exists(bar_s2));
     restartClusterAndCheckImage(true);
     // make sure the whole referred subtree has been destroyed
-    assertEquals(4, fsdir.getRoot().getNamespace());
-    assertEquals(0, fsdir.getRoot().getDiskspace());
+    Quota.Counts q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();  
+    assertEquals(4, q.get(Quota.NAMESPACE));
+    assertEquals(0, q.get(Quota.DISKSPACE));
     
     hdfs.deleteSnapshot(sdir1, "s1");
     restartClusterAndCheckImage(true);
-    assertEquals(3, fsdir.getRoot().getNamespace());
-    assertEquals(0, fsdir.getRoot().getDiskspace());
+    q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();  
+    assertEquals(3, q.get(Quota.NAMESPACE));
+    assertEquals(0, q.get(Quota.DISKSPACE));
   }
   
   /**
@@ -1938,10 +1940,12 @@ public class TestRenameWithSnapshots {
     // check
     final INodeDirectorySnapshottable dir1Node = 
         (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir1.toString());
-    assertEquals(4, dir1Node.getNamespace());
+    Quota.Counts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();  
+    assertEquals(4, q1.get(Quota.NAMESPACE));
     final INodeDirectorySnapshottable dir2Node = 
         (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2.toString());
-    assertEquals(2, dir2Node.getNamespace());
+    Quota.Counts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();  
+    assertEquals(2, q2.get(Quota.NAMESPACE));
     
     final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
         foo.getName());
@@ -2005,10 +2009,12 @@ public class TestRenameWithSnapshots {
     final INodeDirectorySnapshottable dir1Node = 
         (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir1.toString());
     // sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
-    assertEquals(9, dir1Node.getNamespace());
+    Quota.Counts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();  
+    assertEquals(9, q1.get(Quota.NAMESPACE));
     final INodeDirectorySnapshottable dir2Node = 
         (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2.toString());
-    assertEquals(2, dir2Node.getNamespace());
+    Quota.Counts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();  
+    assertEquals(2, q2.get(Quota.NAMESPACE));
     
     final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
         foo.getName());

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java?rev=1545768&r1=1545767&r2=1545768&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java Tue Nov 26 18:33:22 2013
@@ -46,7 +46,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
-import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -157,15 +156,21 @@ public class TestSnapshotDeletion {
     hdfs.delete(dir, true);
   }
   
+  private static INodeDirectory getDir(final FSDirectory fsdir, final Path dir)
+      throws IOException {
+    final String dirStr = dir.toString();
+    return INodeDirectory.valueOf(fsdir.getINode(dirStr), dirStr);
+  }
+
   private void checkQuotaUsageComputation(final Path dirPath,
       final long expectedNs, final long expectedDs) throws IOException {
-    INode node = fsdir.getINode(dirPath.toString());
-    assertTrue(node.isDirectory() && node.isQuotaSet());
-    INodeDirectoryWithQuota dirNode = (INodeDirectoryWithQuota) node;
+    INodeDirectory dirNode = getDir(fsdir, dirPath);
+    assertTrue(dirNode.isQuotaSet());
+    Quota.Counts q = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
     assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
-        dirNode.getNamespace());
+        q.get(Quota.NAMESPACE));
     assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
-        dirNode.getDiskspace());
+        q.get(Quota.DISKSPACE));
     Quota.Counts counts = Quota.Counts.newInstance();
     dirNode.computeQuotaUsage(counts, false);
     assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java?rev=1545768&r1=1545767&r2=1545768&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java Tue Nov 26 18:33:22 2013
@@ -305,7 +305,7 @@ public class TestDiff {
     final int i = Diff.search(current, inode.getKey());
     Assert.assertTrue(i >= 0);
     final INodeDirectory oldinode = (INodeDirectory)current.get(i);
-    final INodeDirectory newinode = new INodeDirectory(oldinode, false);
+    final INodeDirectory newinode = new INodeDirectory(oldinode, false, true);
     newinode.setModificationTime(oldinode.getModificationTime() + 1);
 
     current.set(i, newinode);