You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2015/02/14 06:08:00 UTC

[3/3] hadoop git commit: HDFS-7775. Use consistent naming for NN-internal quota related types and functions. (Contributed bu Xiaoyu Yao)

HDFS-7775. Use consistent naming for NN-internal quota related types and functions. (Contributed bu Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6aa6cbf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6aa6cbf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6aa6cbf

Branch: refs/heads/branch-2
Commit: c6aa6cbfe5bda64c8b0c2f58be3b583f23ea762c
Parents: 751b834
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Feb 13 21:01:33 2015 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Feb 13 21:01:54 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   7 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  24 ++--
 .../hadoop/hdfs/DistributedFileSystem.java      |  10 +-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    |  18 +--
 .../hadoop/hdfs/protocol/ClientProtocol.java    |   8 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java |   2 +-
 .../ClientNamenodeProtocolTranslatorPB.java     |   4 +-
 .../namenode/DirectoryWithQuotaFeature.java     | 111 ++++++++++---------
 .../hdfs/server/namenode/FSDirAttrOp.java       |  36 +++---
 .../hdfs/server/namenode/FSDirConcatOp.java     |   2 +-
 .../hdfs/server/namenode/FSDirectory.java       |  39 +++----
 .../hadoop/hdfs/server/namenode/FSImage.java    |  16 +--
 .../hdfs/server/namenode/FSImageFormat.java     |   9 +-
 .../server/namenode/FSImageFormatPBINode.java   |   6 +-
 .../server/namenode/FSImageSerialization.java   |   2 +-
 .../hdfs/server/namenode/FSNamesystem.java      |   6 +-
 .../hadoop/hdfs/server/namenode/INode.java      |  18 +--
 .../hdfs/server/namenode/INodeDirectory.java    |  12 +-
 .../namenode/INodeDirectoryAttributes.java      |   8 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  46 ++++----
 .../hdfs/server/namenode/INodeReference.java    |   4 +-
 .../hdfs/server/namenode/INodeSymlink.java      |   2 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   4 +-
 .../hadoop/hdfs/server/namenode/Quota.java      |  10 +-
 .../hdfs/server/namenode/QuotaCounts.java       |  98 ++++++++--------
 .../snapshot/FileWithSnapshotFeature.java       |  14 +--
 .../src/main/proto/ClientNamenodeProtocol.proto |   2 +-
 .../namenode/TestDiskspaceQuotaUpdate.java      |  14 +--
 .../server/namenode/TestQuotaByStorageType.java |   6 +-
 .../snapshot/TestRenameWithSnapshots.java       |  10 +-
 .../namenode/snapshot/TestSnapshotDeletion.java |   4 +-
 31 files changed, 285 insertions(+), 267 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 25ab567..a3d6b8e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -654,6 +654,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7778. Rename FsVolumeListTest to TestFsVolumeList and commit it to
     branch-2. (Lei (Eddy) Xu via cnauroth)
 
+    HDFS-4625. BKJM doesn't take advantage of speculative reads. (Rakesh R
+    via aajisaka)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode
@@ -668,8 +671,8 @@ Release 2.7.0 - UNRELEASED
       HDFS-7776. Adding additional unit tests for Quota By Storage Type.
       (Xiaoyu Yao via Arpit Agarwal)
 
-      HDFS-4625. BKJM doesn't take advantage of speculative reads. (Rakesh R
-      via aajisaka)
+      HDFS-7775. Use consistent naming for NN-internal quota related types
+      and functions. (Xiaoyu Yao via Arpit Agarwal)
 
 Release 2.6.1 - UNRELEASED
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index e56c48d..d27197f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3019,22 +3019,22 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
    * Sets or resets quotas for a directory.
    * @see ClientProtocol#setQuota(String, long, long, StorageType)
    */
-  void setQuota(String src, long namespaceQuota, long diskspaceQuota) 
+  void setQuota(String src, long namespaceQuota, long storagespaceQuota)
       throws IOException {
     // sanity check
     if ((namespaceQuota <= 0 && namespaceQuota != HdfsConstants.QUOTA_DONT_SET &&
          namespaceQuota != HdfsConstants.QUOTA_RESET) ||
-        (diskspaceQuota <= 0 && diskspaceQuota != HdfsConstants.QUOTA_DONT_SET &&
-         diskspaceQuota != HdfsConstants.QUOTA_RESET)) {
+        (storagespaceQuota <= 0 && storagespaceQuota != HdfsConstants.QUOTA_DONT_SET &&
+         storagespaceQuota != HdfsConstants.QUOTA_RESET)) {
       throw new IllegalArgumentException("Invalid values for quota : " +
-                                         namespaceQuota + " and " + 
-                                         diskspaceQuota);
+                                         namespaceQuota + " and " +
+                                         storagespaceQuota);
                                          
     }
     TraceScope scope = getPathTraceScope("setQuota", src);
     try {
-      // Pass null as storage type for traditional space/namespace quota.
-      namenode.setQuota(src, namespaceQuota, diskspaceQuota, null);
+      // Pass null as storage type for traditional namespace/storagespace quota.
+      namenode.setQuota(src, namespaceQuota, storagespaceQuota, null);
     } catch(RemoteException re) {
       throw re.unwrapRemoteException(AccessControlException.class,
                                      FileNotFoundException.class,
@@ -3051,12 +3051,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
    * Sets or resets quotas by storage type for a directory.
    * @see ClientProtocol#setQuota(String, long, long, StorageType)
    */
-  void setQuotaByStorageType(String src, StorageType type, long spaceQuota)
+  void setQuotaByStorageType(String src, StorageType type, long quota)
       throws IOException {
-    if (spaceQuota <= 0 && spaceQuota != HdfsConstants.QUOTA_DONT_SET &&
-        spaceQuota != HdfsConstants.QUOTA_RESET) {
+    if (quota <= 0 && quota != HdfsConstants.QUOTA_DONT_SET &&
+        quota != HdfsConstants.QUOTA_RESET) {
       throw new IllegalArgumentException("Invalid values for quota :" +
-        spaceQuota);
+        quota);
     }
     if (type == null) {
       throw new IllegalArgumentException("Invalid storage type(null)");
@@ -3066,7 +3066,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
         + type.toString());
     }
     try {
-      namenode.setQuota(src, HdfsConstants.QUOTA_DONT_SET, spaceQuota, type);
+      namenode.setQuota(src, HdfsConstants.QUOTA_DONT_SET, quota, type);
     } catch (RemoteException re) {
       throw re.unwrapRemoteException(AccessControlException.class,
         FileNotFoundException.class,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 323ff44..2cecdfb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -692,13 +692,13 @@ public class DistributedFileSystem extends FileSystem {
    * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, long, long, StorageType)
    */
   public void setQuota(Path src, final long namespaceQuota,
-      final long diskspaceQuota) throws IOException {
+      final long storagespaceQuota) throws IOException {
     Path absF = fixRelativePart(src);
     new FileSystemLinkResolver<Void>() {
       @Override
       public Void doCall(final Path p)
           throws IOException, UnresolvedLinkException {
-        dfs.setQuota(getPathName(p), namespaceQuota, diskspaceQuota);
+        dfs.setQuota(getPathName(p), namespaceQuota, storagespaceQuota);
         return null;
       }
       @Override
@@ -716,18 +716,18 @@ public class DistributedFileSystem extends FileSystem {
    *
    * @param src target directory whose quota is to be modified.
    * @param type storage type of the specific storage type quota to be modified.
-   * @param spaceQuota value of the specific storage type quota to be modified.
+   * @param quota value of the specific storage type quota to be modified.
    * Maybe {@link HdfsConstants#QUOTA_RESET} to clear quota by storage type.
    */
   public void setQuotaByStorageType(
-    Path src, final StorageType type, final long spaceQuota)
+    Path src, final StorageType type, final long quota)
     throws IOException {
     Path absF = fixRelativePart(src);
     new FileSystemLinkResolver<Void>() {
       @Override
       public Void doCall(final Path p)
         throws IOException, UnresolvedLinkException {
-        dfs.setQuotaByStorageType(getPathName(p), type, spaceQuota);
+        dfs.setQuotaByStorageType(getPathName(p), type, quota);
         return null;
       }
       @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
index ca80ec4..4db19ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
@@ -96,8 +96,8 @@ public class HdfsAdmin {
   }
   
   /**
-   * Set the disk space quota (size of files) for a directory. Note that
-   * directories and sym links do not occupy disk space.
+   * Set the storage space quota (size of files) for a directory. Note that
+   * directories and sym links do not occupy storage space.
    * 
    * @param src the path to set the space quota of
    * @param spaceQuota the value to set for the space quota
@@ -108,8 +108,8 @@ public class HdfsAdmin {
   }
   
   /**
-   * Clear the disk space quota (size of files) for a directory. Note that
-   * directories and sym links do not occupy disk space.
+   * Clear the storage space quota (size of files) for a directory. Note that
+   * directories and sym links do not occupy storage space.
    * 
    * @param src the path to clear the space quota of
    * @throws IOException in the event of error
@@ -120,21 +120,21 @@ public class HdfsAdmin {
 
   /**
    * Set the quota by storage type for a directory. Note that
-   * directories and sym links do not occupy disk space.
+   * directories and sym links do not occupy storage type quota.
    *
    * @param src the target directory to set the quota by storage type
    * @param type the storage type to set for quota by storage type
-   * @param spaceQuota the value to set for quota by storage type
+   * @param quota the value to set for quota by storage type
    * @throws IOException in the event of error
    */
-  public void setQuotaByStorageType(Path src, StorageType type, long spaceQuota)
+  public void setQuotaByStorageType(Path src, StorageType type, long quota)
       throws IOException {
-    dfs.setQuotaByStorageType(src, type, spaceQuota);
+    dfs.setQuotaByStorageType(src, type, quota);
   }
 
   /**
    * Clear the space quota by storage type for a directory. Note that
-   * directories and sym links do not occupy disk space.
+   * directories and sym links do not occupy storage type quota.
    *
    * @param src the target directory to clear the quota by storage type
    * @param type the storage type to clear for quota by storage type

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index f5fc937..799e7f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -951,10 +951,14 @@ public interface ClientProtocol {
    * @param path  The string representation of the path to the directory
    * @param namespaceQuota Limit on the number of names in the tree rooted 
    *                       at the directory
-   * @param diskspaceQuota Limit on disk space occupied all the files under
+   * @param storagespaceQuota Limit on storage space occupied all the files under
    *                       this directory.
    * @param type StorageType that the space quota is intended to be set on.
    *             It may be null when called by traditional space/namespace quota.
+   *             When type is is not null, the storagespaceQuota parameter is for
+   *             type specified and namespaceQuota must be
+   *             {@link HdfsConstants#QUOTA_DONT_SET}.
+   *
    * <br><br>
    *                       
    * The quota can have three types of values : (1) 0 or more will set 
@@ -971,7 +975,7 @@ public interface ClientProtocol {
    * @throws IOException If an I/O error occurred
    */
   @Idempotent
-  public void setQuota(String path, long namespaceQuota, long diskspaceQuota,
+  public void setQuota(String path, long namespaceQuota, long storagespaceQuota,
       StorageType type) throws AccessControlException, FileNotFoundException,
       UnresolvedLinkException, SnapshotAccessControlException, IOException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 30a46df..ce8c392 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -887,7 +887,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       SetQuotaRequestProto req) throws ServiceException {
     try {
       server.setQuota(req.getPath(), req.getNamespaceQuota(),
-          req.getDiskspaceQuota(),
+          req.getStoragespaceQuota(),
           req.hasStorageType() ?
           PBHelper.convertStorageType(req.getStorageType()): null);
       return VOID_SETQUOTA_RESPONSE;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index 9ab380b..771582d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -806,7 +806,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
 
   @Override
-  public void setQuota(String path, long namespaceQuota, long diskspaceQuota,
+  public void setQuota(String path, long namespaceQuota, long storagespaceQuota,
                        StorageType type)
       throws AccessControlException, FileNotFoundException,
       UnresolvedLinkException, IOException {
@@ -814,7 +814,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
         = SetQuotaRequestProto.newBuilder()
         .setPath(path)
         .setNamespaceQuota(namespaceQuota)
-        .setDiskspaceQuota(diskspaceQuota);
+        .setStoragespaceQuota(storagespaceQuota);
     if (type != null) {
       builder.setStorageType(PBHelper.convertStorageType(type));
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
index e7eeba6..73473c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.util.EnumCounters;
  */
 public final class DirectoryWithQuotaFeature implements INode.Feature {
   public static final long DEFAULT_NAMESPACE_QUOTA = Long.MAX_VALUE;
-  public static final long DEFAULT_SPACE_QUOTA = HdfsConstants.QUOTA_RESET;
+  public static final long DEFAULT_STORAGE_SPACE_QUOTA = HdfsConstants.QUOTA_RESET;
 
   private QuotaCounts quota;
   private QuotaCounts usage;
@@ -41,9 +41,10 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
     private QuotaCounts usage;
 
     public Builder() {
-      this.quota = new QuotaCounts.Builder().nameCount(DEFAULT_NAMESPACE_QUOTA).
-          spaceCount(DEFAULT_SPACE_QUOTA).typeCounts(DEFAULT_SPACE_QUOTA).build();
-      this.usage = new QuotaCounts.Builder().nameCount(1).build();
+      this.quota = new QuotaCounts.Builder().nameSpace(DEFAULT_NAMESPACE_QUOTA).
+          storageSpace(DEFAULT_STORAGE_SPACE_QUOTA).
+          typeSpaces(DEFAULT_STORAGE_SPACE_QUOTA).build();
+      this.usage = new QuotaCounts.Builder().nameSpace(1).build();
     }
 
     public Builder nameSpaceQuota(long nameSpaceQuota) {
@@ -51,8 +52,8 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
       return this;
     }
 
-    public Builder spaceQuota(long spaceQuota) {
-      this.quota.setDiskSpace(spaceQuota);
+    public Builder storageSpaceQuota(long spaceQuota) {
+      this.quota.setStorageSpace(spaceQuota);
       return this;
     }
 
@@ -84,33 +85,41 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
   /** Set this directory's quota
    * 
    * @param nsQuota Namespace quota to be set
-   * @param dsQuota Diskspace quota to be set
-   * @param type Storage type quota to be set
-   * * To set traditional space/namespace quota, type must be null
+   * @param ssQuota Storagespace quota to be set
+   * @param type Storage type of the storage space quota to be set.
+   *             To set storagespace/namespace quota, type must be null.
    */
-  void setQuota(long nsQuota, long dsQuota, StorageType type) {
+  void setQuota(long nsQuota, long ssQuota, StorageType type) {
     if (type != null) {
-      this.quota.setTypeSpace(type, dsQuota);
+      this.quota.setTypeSpace(type, ssQuota);
     } else {
-      setQuota(nsQuota, dsQuota);
+      setQuota(nsQuota, ssQuota);
     }
   }
 
-  void setQuota(long nsQuota, long dsQuota) {
+  void setQuota(long nsQuota, long ssQuota) {
     this.quota.setNameSpace(nsQuota);
-    this.quota.setDiskSpace(dsQuota);
+    this.quota.setStorageSpace(ssQuota);
   }
 
-  void setQuota(long dsQuota, StorageType type) {
-    this.quota.setTypeSpace(type, dsQuota);
+  void setQuota(long quota, StorageType type) {
+    this.quota.setTypeSpace(type, quota);
   }
 
-  // Set in a batch only during FSImage load
-  void setQuota(EnumCounters<StorageType> typeQuotas) {
-    this.quota.setTypeSpaces(typeQuotas);
+  /** Set storage type quota in a batch. (Only used by FSImage load)
+   *
+   * @param tsQuotas type space counts for all storage types supporting quota
+   */
+  void setQuota(EnumCounters<StorageType> tsQuotas) {
+    this.quota.setTypeSpaces(tsQuotas);
   }
 
-  QuotaCounts addNamespaceDiskspace(QuotaCounts counts) {
+  /**
+   * Add current quota usage to counts and return the updated counts
+   * @param counts counts to be added with current quota usage
+   * @return counts that have been added with the current qutoa usage
+   */
+  QuotaCounts AddCurrentSpaceUsage(QuotaCounts counts) {
     counts.add(this.usage);
     return counts;
   }
@@ -122,15 +131,15 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
     dir.computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID);
     // Check only when the content has not changed in the middle.
     if (oldYieldCount == summary.getYieldCount()) {
-      checkDiskspace(dir, summary.getCounts().get(Content.DISKSPACE) - original);
+      checkStoragespace(dir, summary.getCounts().get(Content.DISKSPACE) - original);
     }
     return summary;
   }
 
-  private void checkDiskspace(final INodeDirectory dir, final long computed) {
-    if (-1 != quota.getDiskSpace() && usage.getDiskSpace() != computed) {
-      NameNode.LOG.error("BUG: Inconsistent diskspace for directory "
-          + dir.getFullPathName() + ". Cached = " + usage.getDiskSpace()
+  private void checkStoragespace(final INodeDirectory dir, final long computed) {
+    if (-1 != quota.getStorageSpace() && usage.getStorageSpace() != computed) {
+      NameNode.LOG.error("BUG: Inconsistent storagespace for directory "
+          + dir.getFullPathName() + ". Cached = " + usage.getStorageSpace()
           + " != Computed = " + computed);
     }
   }
@@ -163,28 +172,28 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
   }
 
   /** 
-   * Sets namespace and diskspace take by the directory rooted 
+   * Sets namespace and storagespace take by the directory rooted
    * at this INode. This should be used carefully. It does not check 
    * for quota violations.
    * 
    * @param namespace size of the directory to be set
-   * @param diskspace disk space take by all the nodes under this directory
-   * @param typeUsed counters of storage type usage
+   * @param storagespace storage space take by all the nodes under this directory
+   * @param typespaces counters of storage type usage
    */
-  void setSpaceConsumed(long namespace, long diskspace,
-      EnumCounters<StorageType> typeUsed) {
+  void setSpaceConsumed(long namespace, long storagespace,
+      EnumCounters<StorageType> typespaces) {
     usage.setNameSpace(namespace);
-    usage.setDiskSpace(diskspace);
-    usage.setTypeSpaces(typeUsed);
+    usage.setStorageSpace(storagespace);
+    usage.setTypeSpaces(typespaces);
   }
 
   void setSpaceConsumed(QuotaCounts c) {
     usage.setNameSpace(c.getNameSpace());
-    usage.setDiskSpace(c.getDiskSpace());
+    usage.setStorageSpace(c.getStorageSpace());
     usage.setTypeSpaces(c.getTypeSpaces());
   }
 
-  /** @return the namespace and diskspace consumed. */
+  /** @return the namespace and storagespace and typespace consumed. */
   public QuotaCounts getSpaceConsumed() {
     return new QuotaCounts.Builder().quotaCount(usage).build();
   }
@@ -196,11 +205,11 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
           usage.getNameSpace() + delta);
     }
   }
-  /** Verify if the diskspace quota is violated after applying delta. */
-  private void verifyDiskspaceQuota(long delta) throws DSQuotaExceededException {
-    if (Quota.isViolated(quota.getDiskSpace(), usage.getDiskSpace(), delta)) {
-      throw new DSQuotaExceededException(quota.getDiskSpace(),
-          usage.getDiskSpace() + delta);
+  /** Verify if the storagespace quota is violated after applying delta. */
+  private void verifyStoragespaceQuota(long delta) throws DSQuotaExceededException {
+    if (Quota.isViolated(quota.getStorageSpace(), usage.getStorageSpace(), delta)) {
+      throw new DSQuotaExceededException(quota.getStorageSpace(),
+          usage.getStorageSpace() + delta);
     }
   }
 
@@ -222,22 +231,22 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
   }
 
   /**
-   * @throws QuotaExceededException if namespace, diskspace or storage type quotas
-   * is violated after applying the deltas.
+   * @throws QuotaExceededException if namespace, storagespace or storage type
+   * space quota is violated after applying the deltas.
    */
   void verifyQuota(QuotaCounts counts) throws QuotaExceededException {
     verifyNamespaceQuota(counts.getNameSpace());
-    verifyDiskspaceQuota(counts.getDiskSpace());
+    verifyStoragespaceQuota(counts.getStorageSpace());
     verifyQuotaByStorageType(counts.getTypeSpaces());
   }
 
   boolean isQuotaSet() {
-    return quota.anyNsSpCountGreaterOrEqual(0) ||
-        quota.anyTypeCountGreaterOrEqual(0);
+    return quota.anyNsSsCountGreaterOrEqual(0) ||
+        quota.anyTypeSpaceCountGreaterOrEqual(0);
   }
 
   boolean isQuotaByStorageTypeSet() {
-    return quota.anyTypeCountGreaterOrEqual(0);
+    return quota.anyTypeSpaceCountGreaterOrEqual(0);
   }
 
   boolean isQuotaByStorageTypeSet(StorageType t) {
@@ -248,12 +257,12 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
     return "namespace: " + (quota.getNameSpace() < 0? "-":
         usage.getNameSpace() + "/" + quota.getNameSpace());
   }
-  private String diskspaceString() {
-    return "diskspace: " + (quota.getDiskSpace() < 0? "-":
-        usage.getDiskSpace() + "/" + quota.getDiskSpace());
+  private String storagespaceString() {
+    return "storagespace: " + (quota.getStorageSpace() < 0? "-":
+        usage.getStorageSpace() + "/" + quota.getStorageSpace());
   }
 
-  private String quotaByStorageTypeString() {
+  private String typeSpaceString() {
     StringBuilder sb = new StringBuilder();
     for (StorageType t : StorageType.getTypesSupportingQuota()) {
       sb.append("StorageType: " + t +
@@ -265,7 +274,7 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
 
   @Override
   public String toString() {
-    return "Quota[" + namespaceString() + ", " + diskspaceString() +
-        ", " + quotaByStorageTypeString() + "]";
+    return "Quota[" + namespaceString() + ", " + storagespaceString() +
+        ", " + typeSpaceString() + "]";
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 5843b4c..1fb2688 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -219,12 +219,12 @@ public class FSDirAttrOp {
   }
 
   /**
-   * Set the namespace quota, diskspace and typeSpace quota for a directory.
+   * Set the namespace, storagespace and typespace quota for a directory.
    *
    * Note: This does not support ".inodes" relative path.
    */
-  static void setQuota(FSDirectory fsd, String src, long nsQuota, long dsQuota, StorageType type)
-      throws IOException {
+  static void setQuota(FSDirectory fsd, String src, long nsQuota, long ssQuota,
+      StorageType type) throws IOException {
     if (fsd.isPermissionEnabled()) {
       FSPermissionChecker pc = fsd.getPermissionChecker();
       pc.checkSuperuserPrivilege();
@@ -232,11 +232,11 @@ public class FSDirAttrOp {
 
     fsd.writeLock();
     try {
-      INodeDirectory changed = unprotectedSetQuota(fsd, src, nsQuota, dsQuota, type);
+      INodeDirectory changed = unprotectedSetQuota(fsd, src, nsQuota, ssQuota, type);
       if (changed != null) {
         final QuotaCounts q = changed.getQuotaCounts();
         if (type == null) {
-          fsd.getEditLog().logSetQuota(src, q.getNameSpace(), q.getDiskSpace());
+          fsd.getEditLog().logSetQuota(src, q.getNameSpace(), q.getStorageSpace());
         } else {
           fsd.getEditLog().logSetQuotaByStorageType(
               src, q.getTypeSpaces().get(type), type);
@@ -314,7 +314,7 @@ public class FSDirAttrOp {
    * @throws SnapshotAccessControlException if path is in RO snapshot
    */
   static INodeDirectory unprotectedSetQuota(
-      FSDirectory fsd, String src, long nsQuota, long dsQuota, StorageType type)
+      FSDirectory fsd, String src, long nsQuota, long ssQuota, StorageType type)
       throws FileNotFoundException, PathIsNotDirectoryException,
       QuotaExceededException, UnresolvedLinkException,
       SnapshotAccessControlException, UnsupportedActionException {
@@ -322,11 +322,11 @@ public class FSDirAttrOp {
     // sanity check
     if ((nsQuota < 0 && nsQuota != HdfsConstants.QUOTA_DONT_SET &&
          nsQuota != HdfsConstants.QUOTA_RESET) ||
-        (dsQuota < 0 && dsQuota != HdfsConstants.QUOTA_DONT_SET &&
-          dsQuota != HdfsConstants.QUOTA_RESET)) {
+        (ssQuota < 0 && ssQuota != HdfsConstants.QUOTA_DONT_SET &&
+          ssQuota != HdfsConstants.QUOTA_RESET)) {
       throw new IllegalArgumentException("Illegal value for nsQuota or " +
-                                         "dsQuota : " + nsQuota + " and " +
-                                         dsQuota);
+                                         "ssQuota : " + nsQuota + " and " +
+                                         ssQuota);
     }
     // sanity check for quota by storage type
     if ((type != null) && (!fsd.isQuotaByStorageTypeEnabled() ||
@@ -346,31 +346,31 @@ public class FSDirAttrOp {
     } else { // a directory inode
       final QuotaCounts oldQuota = dirNode.getQuotaCounts();
       final long oldNsQuota = oldQuota.getNameSpace();
-      final long oldDsQuota = oldQuota.getDiskSpace();
+      final long oldSsQuota = oldQuota.getStorageSpace();
 
       if (nsQuota == HdfsConstants.QUOTA_DONT_SET) {
         nsQuota = oldNsQuota;
       }
-      if (dsQuota == HdfsConstants.QUOTA_DONT_SET) {
-        dsQuota = oldDsQuota;
+      if (ssQuota == HdfsConstants.QUOTA_DONT_SET) {
+        ssQuota = oldSsQuota;
       }
 
       // unchanged space/namespace quota
-      if (type == null && oldNsQuota == nsQuota && oldDsQuota == dsQuota) {
+      if (type == null && oldNsQuota == nsQuota && oldSsQuota == ssQuota) {
         return null;
       }
 
       // unchanged type quota
       if (type != null) {
           EnumCounters<StorageType> oldTypeQuotas = oldQuota.getTypeSpaces();
-          if (oldTypeQuotas != null && oldTypeQuotas.get(type) == dsQuota) {
+          if (oldTypeQuotas != null && oldTypeQuotas.get(type) == ssQuota) {
               return null;
           }
       }
 
       final int latest = iip.getLatestSnapshotId();
       dirNode.recordModification(latest);
-      dirNode.setQuota(fsd.getBlockStoragePolicySuite(), nsQuota, dsQuota, type);
+      dirNode.setQuota(fsd.getBlockStoragePolicySuite(), nsQuota, ssQuota, type);
       return dirNode;
     }
   }
@@ -393,7 +393,7 @@ public class FSDirAttrOp {
     // if replication > oldBR, then newBR == replication.
     // if replication < oldBR, we don't know newBR yet.
     if (replication > oldBR) {
-      long dsDelta = file.diskspaceConsumed()/oldBR;
+      long dsDelta = file.storagespaceConsumed()/oldBR;
       fsd.updateCount(iip, 0L, dsDelta, oldBR, replication, true);
     }
 
@@ -402,7 +402,7 @@ public class FSDirAttrOp {
     final short newBR = file.getBlockReplication();
     // check newBR < oldBR case.
     if (newBR < oldBR) {
-      long dsDelta = file.diskspaceConsumed()/newBR;
+      long dsDelta = file.storagespaceConsumed()/newBR;
       fsd.updateCount(iip, 0L, dsDelta, oldBR, newBR, true);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index d8cf42a..7ba8c4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -150,7 +150,7 @@ class FSDirConcatOp {
       short srcRepl = src.getBlockReplication();
       long fileSize = src.computeFileSize();
       if (targetRepl != srcRepl) {
-        deltas.addDiskSpace(fileSize * (targetRepl - srcRepl));
+        deltas.addStorageSpace(fileSize * (targetRepl - srcRepl));
         BlockStoragePolicy bsp =
             fsd.getBlockStoragePolicySuite().getPolicy(src.getStoragePolicyID());
         if (bsp != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 7f1437d..ba18a40 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -108,7 +108,8 @@ public class FSDirectory implements Closeable {
     r.addDirectoryWithQuotaFeature(
         new DirectoryWithQuotaFeature.Builder().
             nameSpaceQuota(DirectoryWithQuotaFeature.DEFAULT_NAMESPACE_QUOTA).
-            spaceQuota(DirectoryWithQuotaFeature.DEFAULT_SPACE_QUOTA).build());
+            storageSpaceQuota(DirectoryWithQuotaFeature.DEFAULT_STORAGE_SPACE_QUOTA).
+            build());
     r.addSnapshottableFeature();
     r.setSnapshotQuota(0);
     return r;
@@ -600,18 +601,18 @@ public class FSDirectory implements Closeable {
     }
   }
 
-  /** Updates namespace and diskspace consumed for all
+  /** Updates namespace, storagespace and typespaces consumed for all
    * directories until the parent directory of file represented by path.
    *
    * @param iip the INodesInPath instance containing all the INodes for
    *            updating quota usage
    * @param nsDelta the delta change of namespace
-   * @param dsDelta the delta change of space consumed without replication
+   * @param ssDelta the delta change of storage space consumed without replication
    * @param replication the replication factor of the block consumption change
    * @throws QuotaExceededException if the new count violates any quota limit
    * @throws FileNotFoundException if path does not exist.
    */
-  void updateSpaceConsumed(INodesInPath iip, long nsDelta, long dsDelta, short replication)
+  void updateSpaceConsumed(INodesInPath iip, long nsDelta, long ssDelta, short replication)
     throws QuotaExceededException, FileNotFoundException,
     UnresolvedLinkException, SnapshotAccessControlException {
     writeLock();
@@ -619,7 +620,7 @@ public class FSDirectory implements Closeable {
       if (iip.getLastINode() == null) {
         throw new FileNotFoundException("Path not found: " + iip.getPath());
       }
-      updateCount(iip, nsDelta, dsDelta, replication, true);
+      updateCount(iip, nsDelta, ssDelta, replication, true);
     } finally {
       writeUnlock();
     }
@@ -641,30 +642,30 @@ public class FSDirectory implements Closeable {
   /**
    * Update usage count without replication factor change
    */
-  void updateCount(INodesInPath iip, long nsDelta, long dsDelta, short replication,
+  void updateCount(INodesInPath iip, long nsDelta, long ssDelta, short replication,
       boolean checkQuota) throws QuotaExceededException {
     final INodeFile fileINode = iip.getLastINode().asFile();
     EnumCounters<StorageType> typeSpaceDeltas =
-      getStorageTypeDeltas(fileINode.getStoragePolicyID(), dsDelta,
+      getStorageTypeDeltas(fileINode.getStoragePolicyID(), ssDelta,
           replication, replication);;
     updateCount(iip, iip.length() - 1,
-      new QuotaCounts.Builder().nameCount(nsDelta).spaceCount(dsDelta * replication).
-          typeCounts(typeSpaceDeltas).build(),
+      new QuotaCounts.Builder().nameSpace(nsDelta).storageSpace(ssDelta * replication).
+          typeSpaces(typeSpaceDeltas).build(),
         checkQuota);
   }
 
   /**
    * Update usage count with replication factor change due to setReplication
    */
-  void updateCount(INodesInPath iip, long nsDelta, long dsDelta, short oldRep,
+  void updateCount(INodesInPath iip, long nsDelta, long ssDelta, short oldRep,
       short newRep, boolean checkQuota) throws QuotaExceededException {
     final INodeFile fileINode = iip.getLastINode().asFile();
     EnumCounters<StorageType> typeSpaceDeltas =
-        getStorageTypeDeltas(fileINode.getStoragePolicyID(), dsDelta, oldRep, newRep);
+        getStorageTypeDeltas(fileINode.getStoragePolicyID(), ssDelta, oldRep, newRep);
     updateCount(iip, iip.length() - 1,
-        new QuotaCounts.Builder().nameCount(nsDelta).
-            spaceCount(dsDelta * (newRep - oldRep)).
-            typeCounts(typeSpaceDeltas).build(),
+        new QuotaCounts.Builder().nameSpace(nsDelta).
+            storageSpace(ssDelta * (newRep - oldRep)).
+            typeSpaces(typeSpaceDeltas).build(),
         checkQuota);
   }
 
@@ -827,11 +828,11 @@ public class FSDirectory implements Closeable {
 
   /**
    * Verify quota for adding or moving a new INode with required 
-   * namespace and diskspace to a given position.
+   * namespace and storagespace to a given position.
    *  
    * @param iip INodes corresponding to a path
    * @param pos position where a new INode will be added
-   * @param deltas needed namespace, diskspace and storage types
+   * @param deltas needed namespace, storagespace and storage types
    * @param commonAncestor Last node in inodes array that is a common ancestor
    *          for a INode that is being moved from one location to the other.
    *          Pass null if a node is not being moved.
@@ -839,7 +840,7 @@ public class FSDirectory implements Closeable {
    */
   static void verifyQuota(INodesInPath iip, int pos, QuotaCounts deltas,
                           INode commonAncestor) throws QuotaExceededException {
-    if (deltas.getNameSpace() <= 0 && deltas.getDiskSpace() <= 0
+    if (deltas.getNameSpace() <= 0 && deltas.getStorageSpace() <= 0
         && deltas.getTypeSpaces().allLessOrEqual(0L)) {
       // if quota is being freed or not being consumed
       return;
@@ -1101,12 +1102,12 @@ public class FSDirectory implements Closeable {
     INodeFile file = iip.getLastINode().asFile();
     int latestSnapshot = iip.getLatestSnapshotId();
     file.recordModification(latestSnapshot, true);
-    long oldDiskspaceNoRep = file.diskspaceConsumedNoReplication();
+    long oldDiskspaceNoRep = file.storagespaceConsumedNoReplication();
     long remainingLength =
         file.collectBlocksBeyondMax(newLength, collectedBlocks);
     file.excludeSnapshotBlocks(latestSnapshot, collectedBlocks);
     file.setModificationTime(mtime);
-    updateCount(iip, 0, file.diskspaceConsumedNoReplication() - oldDiskspaceNoRep,
+    updateCount(iip, 0, file.storagespaceConsumedNoReplication() - oldDiskspaceNoRep,
       file.getBlockReplication(), true);
     // return whether on a block boundary
     return (remainingLength - newLength) == 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 72b9f8c..321cdc0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -866,7 +866,7 @@ public class FSImage implements Closeable {
   private static void updateCountForQuotaRecursively(BlockStoragePolicySuite bsps,
       INodeDirectory dir, QuotaCounts counts) {
     final long parentNamespace = counts.getNameSpace();
-    final long parentDiskspace = counts.getDiskSpace();
+    final long parentStoragespace = counts.getStorageSpace();
     final EnumCounters<StorageType> parentTypeSpaces = counts.getTypeSpaces();
 
     dir.computeQuotaUsage4CurrentDirectory(bsps, counts);
@@ -892,12 +892,12 @@ public class FSImage implements Closeable {
             + " quota = " + nsQuota + " < consumed = " + namespace);
       }
 
-      final long diskspace = counts.getDiskSpace() - parentDiskspace;
-      final long dsQuota = q.getDiskSpace();
-      if (Quota.isViolated(dsQuota, diskspace)) {
-        LOG.error("BUG: Diskspace quota violation in image for "
+      final long ssConsumed = counts.getStorageSpace() - parentStoragespace;
+      final long ssQuota = q.getStorageSpace();
+      if (Quota.isViolated(ssQuota, ssConsumed)) {
+        LOG.error("BUG: Storagespace quota violation in image for "
             + dir.getFullPathName()
-            + " quota = " + dsQuota + " < consumed = " + diskspace);
+            + " quota = " + ssQuota + " < consumed = " + ssConsumed);
       }
 
       final EnumCounters<StorageType> typeSpaces =
@@ -907,14 +907,14 @@ public class FSImage implements Closeable {
             parentTypeSpaces.get(t);
         final long typeQuota = q.getTypeSpaces().get(t);
         if (Quota.isViolated(typeQuota, typeSpace)) {
-          LOG.error("BUG Disk quota by storage type violation in image for "
+          LOG.error("BUG: Storage type quota violation in image for "
               + dir.getFullPathName()
               + " type = " + t.toString() + " quota = "
               + typeQuota + " < consumed " + typeSpace);
         }
       }
 
-      dir.getDirectoryWithQuotaFeature().setSpaceConsumed(namespace, diskspace,
+      dir.getDirectoryWithQuotaFeature().setSpaceConsumed(namespace, ssConsumed,
           typeSpaces);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index f677587..a95302a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -450,7 +450,7 @@ public class FSImageFormat {
   private void updateRootAttr(INodeWithAdditionalFields root) {                                                           
     final QuotaCounts q = root.getQuotaCounts();
     final long nsQuota = q.getNameSpace();
-    final long dsQuota = q.getDiskSpace();
+    final long dsQuota = q.getStorageSpace();
     FSDirectory fsDir = namesystem.dir;
     if (nsQuota != -1 || dsQuota != -1) {
       fsDir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota);
@@ -826,7 +826,7 @@ public class FSImageFormat {
           permissions, modificationTime);
       if (nsQuota >= 0 || dsQuota >= 0) {
         dir.addDirectoryWithQuotaFeature(new DirectoryWithQuotaFeature.Builder().
-            nameSpaceQuota(nsQuota).spaceQuota(dsQuota).build());
+            nameSpaceQuota(nsQuota).storageSpaceQuota(dsQuota).build());
       }
       if (withSnapshot) {
         dir.addSnapshotFeature(null);
@@ -910,7 +910,10 @@ public class FSImageFormat {
       final PermissionStatus permissions = PermissionStatus.read(in);
       final long modificationTime = in.readLong();
       
-      //read quotas
+      // Read quotas: quota by storage type does not need to be processed below.
+      // It is handled only in protobuf based FsImagePBINode class for newer
+      // fsImages. Tools using this class such as legacy-mode of offline image viewer
+      // should only load legacy FSImages without newer features.
       final long nsQuota = in.readLong();
       final long dsQuota = in.readLong();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index 576c86f..759c8b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -169,7 +169,7 @@ public final class FSImageFormatPBINode {
       final long nsQuota = d.getNsQuota(), dsQuota = d.getDsQuota();
       if (nsQuota >= 0 || dsQuota >= 0) {
         dir.addDirectoryWithQuotaFeature(new DirectoryWithQuotaFeature.Builder().
-            nameSpaceQuota(nsQuota).spaceQuota(dsQuota).build());
+            nameSpaceQuota(nsQuota).storageSpaceQuota(dsQuota).build());
       }
       EnumCounters<StorageType> typeQuotas = null;
       if (d.hasTypeQuotas()) {
@@ -374,7 +374,7 @@ public final class FSImageFormatPBINode {
       INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext());
       final QuotaCounts q = root.getQuotaCounts();
       final long nsQuota = q.getNameSpace();
-      final long dsQuota = q.getDiskSpace();
+      final long dsQuota = q.getStorageSpace();
       if (nsQuota != -1 || dsQuota != -1) {
         dir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota);
       }
@@ -483,7 +483,7 @@ public final class FSImageFormatPBINode {
       INodeSection.INodeDirectory.Builder b = INodeSection.INodeDirectory
           .newBuilder().setModificationTime(dir.getModificationTime())
           .setNsQuota(quota.getNameSpace())
-          .setDsQuota(quota.getDiskSpace())
+          .setDsQuota(quota.getStorageSpace())
           .setPermission(buildPermissionStatus(dir, state.getStringMap()));
 
       if (quota.getTypeSpaces().anyGreaterOrEqual(0)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
index e9f2958..fa9a65e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
@@ -224,7 +224,7 @@ public class FSImageSerialization {
   private static void writeQuota(QuotaCounts quota, DataOutput out)
       throws IOException {
     out.writeLong(quota.getNameSpace());
-    out.writeLong(quota.getDiskSpace());
+    out.writeLong(quota.getStorageSpace());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 213a53e..45072aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3802,20 +3802,20 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
 
   /**
-   * Set the namespace quota and diskspace quota for a directory.
+   * Set the namespace quota and storage space quota for a directory.
    * See {@link ClientProtocol#setQuota(String, long, long, StorageType)} for the
    * contract.
    * 
    * Note: This does not support ".inodes" relative path.
    */
-  void setQuota(String src, long nsQuota, long dsQuota, StorageType type)
+  void setQuota(String src, long nsQuota, long ssQuota, StorageType type)
       throws IOException {
     checkOperation(OperationCategory.WRITE);
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set quota on " + src);
-      FSDirAttrOp.setQuota(dir, src, nsQuota, dsQuota, type);
+      FSDirAttrOp.setQuota(dir, src, nsQuota, ssQuota, type);
     } finally {
       writeUnlock();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 7f3bf38..8c4e466 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -447,7 +447,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
     return new ContentSummary(counts.get(Content.LENGTH),
         counts.get(Content.FILE) + counts.get(Content.SYMLINK),
         counts.get(Content.DIRECTORY), q.getNameSpace(),
-        counts.get(Content.DISKSPACE), q.getDiskSpace());
+        counts.get(Content.DISKSPACE), q.getStorageSpace());
     // TODO: storage type quota reporting HDFS-7701.
   }
 
@@ -462,7 +462,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
 
 
   /**
-   * Check and add namespace/diskspace/storagetype consumed to itself and the ancestors.
+   * Check and add namespace/storagespace/storagetype consumed to itself and the ancestors.
    * @throws QuotaExceededException if quote is violated.
    */
   public void addSpaceConsumed(QuotaCounts counts, boolean verify)
@@ -471,7 +471,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
   }
 
   /**
-   * Check and add namespace/diskspace/storagetype consumed to itself and the ancestors.
+   * Check and add namespace/storagespace/storagetype consumed to itself and the ancestors.
    * @throws QuotaExceededException if quote is violated.
    */
   void addSpaceConsumed2Parent(QuotaCounts counts, boolean verify)
@@ -487,26 +487,26 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
    */
   public QuotaCounts getQuotaCounts() {
     return new QuotaCounts.Builder().
-        nameCount(HdfsConstants.QUOTA_RESET).
-        spaceCount(HdfsConstants.QUOTA_RESET).
-        typeCounts(HdfsConstants.QUOTA_RESET).
+        nameSpace(HdfsConstants.QUOTA_RESET).
+        storageSpace(HdfsConstants.QUOTA_RESET).
+        typeSpaces(HdfsConstants.QUOTA_RESET).
         build();
   }
 
   public final boolean isQuotaSet() {
     final QuotaCounts qc = getQuotaCounts();
-    return qc.anyNsSpCountGreaterOrEqual(0) || qc.anyTypeCountGreaterOrEqual(0);
+    return qc.anyNsSsCountGreaterOrEqual(0) || qc.anyTypeSpaceCountGreaterOrEqual(0);
   }
 
   /**
-   * Count subtree {@link Quota#NAMESPACE} and {@link Quota#DISKSPACE} usages.
+   * Count subtree {@link Quota#NAMESPACE} and {@link Quota#STORAGESPACE} usages.
    */
   public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps) {
     return computeQuotaUsage(bsps, new QuotaCounts.Builder().build(), true);
   }
 
   /**
-   * Count subtree {@link Quota#NAMESPACE} and {@link Quota#DISKSPACE} usages.
+   * Count subtree {@link Quota#NAMESPACE} and {@link Quota#STORAGESPACE} usages.
    * 
    * With the existence of {@link INodeReference}, the same inode and its
    * subtree may be referred by multiple {@link WithName} nodes and a

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index f9d160b..75a7349 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -137,14 +137,14 @@ public class INodeDirectory extends INodeWithAdditionalFields
         BlockStoragePolicySuite.ID_UNSPECIFIED;
   }
 
-  void setQuota(BlockStoragePolicySuite bsps, long nsQuota, long dsQuota, StorageType type) {
+  void setQuota(BlockStoragePolicySuite bsps, long nsQuota, long ssQuota, StorageType type) {
     DirectoryWithQuotaFeature quota = getDirectoryWithQuotaFeature();
     if (quota != null) {
       // already has quota; so set the quota to the new values
       if (type != null) {
-        quota.setQuota(dsQuota, type);
+        quota.setQuota(ssQuota, type);
       } else {
-        quota.setQuota(nsQuota, dsQuota);
+        quota.setQuota(nsQuota, ssQuota);
       }
       if (!isQuotaSet() && !isRoot()) {
         removeFeature(quota);
@@ -154,9 +154,9 @@ public class INodeDirectory extends INodeWithAdditionalFields
       DirectoryWithQuotaFeature.Builder builder =
           new DirectoryWithQuotaFeature.Builder().nameSpaceQuota(nsQuota);
       if (type != null) {
-        builder.typeQuota(type, dsQuota);
+        builder.typeQuota(type, ssQuota);
       } else {
-        builder.spaceQuota(dsQuota);
+        builder.storageSpaceQuota(ssQuota);
       }
       addDirectoryWithQuotaFeature(builder.build()).setSpaceConsumed(c);
     }
@@ -588,7 +588,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
     // compute the quota usage in the scope of the current directory tree
     final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
     if (useCache && q != null && q.isQuotaSet()) { // use the cached quota
-      return q.addNamespaceDiskspace(counts);
+      return q.AddCurrentSpaceUsage(counts);
     } else {
       useCache = q != null && !q.isQuotaSet() ? false : useCache;
       return computeDirectoryQuotaUsage(bsps, counts, useCache, lastSnapshotId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java
index 83649ec..44d5581 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java
@@ -48,8 +48,8 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
 
     @Override
     public QuotaCounts getQuotaCounts() {
-      return new QuotaCounts.Builder().nameCount(-1).
-          spaceCount(-1).typeCounts(-1).build();
+      return new QuotaCounts.Builder().nameSpace(-1).
+          storageSpace(-1).typeSpaces(-1).build();
     }
 
     @Override
@@ -69,8 +69,8 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
         AclFeature aclFeature, long modificationTime, long nsQuota,
         long dsQuota, EnumCounters<StorageType> typeQuotas, XAttrFeature xAttrsFeature) {
       super(name, permissions, aclFeature, modificationTime, xAttrsFeature);
-      this.quota = new QuotaCounts.Builder().nameCount(nsQuota).
-          spaceCount(dsQuota).typeCounts(typeQuotas).build();
+      this.quota = new QuotaCounts.Builder().nameSpace(nsQuota).
+          storageSpace(dsQuota).typeSpaces(typeQuotas).build();
     }
 
     public CopyWithQuota(INodeDirectory dir) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index b7db8d9..eaf72f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -412,8 +412,8 @@ public class INodeFile extends INodeWithAdditionalFields
     return header;
   }
 
-  /** @return the diskspace required for a full block. */
-  final long getPreferredBlockDiskspace() {
+  /** @return the storagespace required for a full block. */
+  final long getPreferredBlockStoragespace() {
     return getPreferredBlockSize() * getBlockReplication();
   }
 
@@ -553,8 +553,8 @@ public class INodeFile extends INodeWithAdditionalFields
       BlockStoragePolicySuite bsps, QuotaCounts counts, boolean useCache,
       int lastSnapshotId) {
     long nsDelta = 1;
-    final long dsDeltaNoReplication;
-    short dsReplication;
+    final long ssDeltaNoReplication;
+    short replication;
     FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
     if (sf != null) {
       FileDiffList fileDiffList = sf.getDiffs();
@@ -562,31 +562,31 @@ public class INodeFile extends INodeWithAdditionalFields
 
       if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
           || last == Snapshot.CURRENT_STATE_ID) {
-        dsDeltaNoReplication = diskspaceConsumedNoReplication();
-        dsReplication = getBlockReplication();
+        ssDeltaNoReplication = storagespaceConsumedNoReplication();
+        replication = getBlockReplication();
       } else if (last < lastSnapshotId) {
-        dsDeltaNoReplication = computeFileSize(true, false);
-        dsReplication = getFileReplication();
+        ssDeltaNoReplication = computeFileSize(true, false);
+        replication = getFileReplication();
       } else {
         int sid = fileDiffList.getSnapshotById(lastSnapshotId);
-        dsDeltaNoReplication = diskspaceConsumedNoReplication(sid);
-        dsReplication = getReplication(sid);
+        ssDeltaNoReplication = storagespaceConsumedNoReplication(sid);
+        replication = getReplication(sid);
       }
     } else {
-      dsDeltaNoReplication = diskspaceConsumedNoReplication();
-      dsReplication = getBlockReplication();
+      ssDeltaNoReplication = storagespaceConsumedNoReplication();
+      replication = getBlockReplication();
     }
     counts.addNameSpace(nsDelta);
-    counts.addDiskSpace(dsDeltaNoReplication * dsReplication);
+    counts.addStorageSpace(ssDeltaNoReplication * replication);
 
     if (getStoragePolicyID() != BlockStoragePolicySuite.ID_UNSPECIFIED){
       BlockStoragePolicy bsp = bsps.getPolicy(getStoragePolicyID());
-      List<StorageType> storageTypes = bsp.chooseStorageTypes(dsReplication);
+      List<StorageType> storageTypes = bsp.chooseStorageTypes(replication);
       for (StorageType t : storageTypes) {
         if (!t.supportTypeQuota()) {
           continue;
         }
-        counts.addTypeSpace(t, dsDeltaNoReplication);
+        counts.addTypeSpace(t, ssDeltaNoReplication);
       }
     }
     return counts;
@@ -610,7 +610,7 @@ public class INodeFile extends INodeWithAdditionalFields
         counts.add(Content.LENGTH, computeFileSize());
       }
     }
-    counts.add(Content.DISKSPACE, diskspaceConsumed());
+    counts.add(Content.DISKSPACE, storagespaceConsumed());
     return summary;
   }
 
@@ -681,11 +681,11 @@ public class INodeFile extends INodeWithAdditionalFields
    * including blocks in its snapshots.
    * Use preferred block size for the last block if it is under construction.
    */
-  public final long diskspaceConsumed() {
-    return diskspaceConsumedNoReplication() * getBlockReplication();
+  public final long storagespaceConsumed() {
+    return storagespaceConsumedNoReplication() * getBlockReplication();
   }
 
-  public final long diskspaceConsumedNoReplication() {
+  public final long storagespaceConsumedNoReplication() {
     FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
     if(sf == null) {
       return computeFileSize(true, true);
@@ -713,12 +713,12 @@ public class INodeFile extends INodeWithAdditionalFields
     return size;
   }
 
-  public final long diskspaceConsumed(int lastSnapshotId) {
+  public final long storagespaceConsumed(int lastSnapshotId) {
     if (lastSnapshotId != CURRENT_STATE_ID) {
       return computeFileSize(lastSnapshotId)
         * getFileReplication(lastSnapshotId);
     } else {
-      return diskspaceConsumed();
+      return storagespaceConsumed();
     }
   }
 
@@ -730,11 +730,11 @@ public class INodeFile extends INodeWithAdditionalFields
     }
   }
 
-  public final long diskspaceConsumedNoReplication(int lastSnapshotId) {
+  public final long storagespaceConsumedNoReplication(int lastSnapshotId) {
     if (lastSnapshotId != CURRENT_STATE_ID) {
       return computeFileSize(lastSnapshotId);
     } else {
-      return diskspaceConsumedNoReplication();
+      return storagespaceConsumedNoReplication();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index f8c813c..911279a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -509,10 +509,10 @@ public abstract class INodeReference extends INode {
     @Override
     public final ContentSummaryComputationContext computeContentSummary(
         ContentSummaryComputationContext summary) {
-      //only count diskspace for WithName
+      //only count storagespace for WithName
       final QuotaCounts q = new QuotaCounts.Builder().build();
       computeQuotaUsage(summary.getBlockStoragePolicySuite(), q, false, lastSnapshotId);
-      summary.getCounts().add(Content.DISKSPACE, q.getDiskSpace());
+      summary.getCounts().add(Content.DISKSPACE, q.getStorageSpace());
       return summary;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
index ef30ed7..fe75687 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
@@ -81,7 +81,7 @@ public class INodeSymlink extends INodeWithAdditionalFields {
         && priorSnapshotId == Snapshot.NO_SNAPSHOT_ID) {
       destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
     }
-    return new QuotaCounts.Builder().nameCount(1).build();
+    return new QuotaCounts.Builder().nameSpace(1).build();
   }
   
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 1f2134a..f56d30e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1186,11 +1186,11 @@ class NameNodeRpcServer implements NamenodeProtocols {
   }
 
   @Override // ClientProtocol
-  public void setQuota(String path, long namespaceQuota, long diskspaceQuota,
+  public void setQuota(String path, long namespaceQuota, long storagespaceQuota,
                        StorageType type)
       throws IOException {
     checkNNStartup();
-    namesystem.setQuota(path, namespaceQuota, diskspaceQuota, type);
+    namesystem.setQuota(path, namespaceQuota, storagespaceQuota, type);
   }
   
   @Override // ClientProtocol

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java
index 6121bcb..6d20e6c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java
@@ -23,16 +23,16 @@ import org.apache.hadoop.hdfs.util.EnumCounters;
 public enum Quota {
   /** The namespace usage, i.e. the number of name objects. */
   NAMESPACE,
-  /** The diskspace usage in bytes including replication. */
-  DISKSPACE;
+  /** The storage space usage in bytes including replication. */
+  STORAGESPACE;
 
   /** Counters for quota counts. */
   public static class Counts extends EnumCounters<Quota> {
-    /** @return a new counter with the given namespace and diskspace usages. */
-    public static Counts newInstance(long namespace, long diskspace) {
+    /** @return a new counter with the given namespace and storagespace usages. */
+    public static Counts newInstance(long namespace, long storagespace) {
       final Counts c = new Counts();
       c.set(NAMESPACE, namespace);
-      c.set(DISKSPACE, diskspace);
+      c.set(STORAGESPACE, storagespace);
       return c;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaCounts.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaCounts.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaCounts.java
index 9b306b0..033a5ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaCounts.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaCounts.java
@@ -22,47 +22,49 @@ import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.util.EnumCounters;
 
 /**
- * Counters for namespace, space and storage type quota and usage.
+ * Counters for namespace, storage space and storage type space quota and usage.
  */
 public class QuotaCounts {
-
-  private EnumCounters<Quota> nsSpCounts;
-  private EnumCounters<StorageType> typeCounts;
+  // Name space and storage space counts (HDFS-7775 refactors the original disk
+  // space count to storage space counts)
+  private EnumCounters<Quota> nsSsCounts;
+  // Storage type space counts
+  private EnumCounters<StorageType> tsCounts;
 
   public static class Builder {
-    private EnumCounters<Quota> nsSpCounts;
-    private EnumCounters<StorageType> typeCounts;
+    private EnumCounters<Quota> nsSsCounts;
+    private EnumCounters<StorageType> tsCounts;
 
     public Builder() {
-      this.nsSpCounts = new EnumCounters<Quota>(Quota.class);
-      this.typeCounts = new EnumCounters<StorageType>(StorageType.class);
+      this.nsSsCounts = new EnumCounters<Quota>(Quota.class);
+      this.tsCounts = new EnumCounters<StorageType>(StorageType.class);
     }
 
-    public Builder nameCount(long val) {
-      this.nsSpCounts.set(Quota.NAMESPACE, val);
+    public Builder nameSpace(long val) {
+      this.nsSsCounts.set(Quota.NAMESPACE, val);
       return this;
     }
 
-    public Builder spaceCount(long val) {
-      this.nsSpCounts.set(Quota.DISKSPACE, val);
+    public Builder storageSpace(long val) {
+      this.nsSsCounts.set(Quota.STORAGESPACE, val);
       return this;
     }
 
-    public Builder typeCounts(EnumCounters<StorageType> val) {
+    public Builder typeSpaces(EnumCounters<StorageType> val) {
       if (val != null) {
-        this.typeCounts.set(val);
+        this.tsCounts.set(val);
       }
       return this;
     }
 
-    public Builder typeCounts(long val) {
-      this.typeCounts.reset(val);
+    public Builder typeSpaces(long val) {
+      this.tsCounts.reset(val);
       return this;
     }
 
     public Builder quotaCount(QuotaCounts that) {
-      this.nsSpCounts.set(that.nsSpCounts);
-      this.typeCounts.set(that.typeCounts);
+      this.nsSsCounts.set(that.nsSsCounts);
+      this.tsCounts.set(that.tsCounts);
       return this;
     }
 
@@ -72,18 +74,18 @@ public class QuotaCounts {
   }
 
   private QuotaCounts(Builder builder) {
-    this.nsSpCounts = builder.nsSpCounts;
-    this.typeCounts = builder.typeCounts;
+    this.nsSsCounts = builder.nsSsCounts;
+    this.tsCounts = builder.tsCounts;
   }
 
   public void add(QuotaCounts that) {
-    this.nsSpCounts.add(that.nsSpCounts);
-    this.typeCounts.add(that.typeCounts);
+    this.nsSsCounts.add(that.nsSsCounts);
+    this.tsCounts.add(that.tsCounts);
   }
 
   public void subtract(QuotaCounts that) {
-    this.nsSpCounts.subtract(that.nsSpCounts);
-    this.typeCounts.subtract(that.typeCounts);
+    this.nsSsCounts.subtract(that.nsSsCounts);
+    this.tsCounts.subtract(that.tsCounts);
   }
 
   /**
@@ -93,70 +95,66 @@ public class QuotaCounts {
    */
   public QuotaCounts negation() {
     QuotaCounts ret = new QuotaCounts.Builder().quotaCount(this).build();
-    ret.nsSpCounts.negation();
-    ret.typeCounts.negation();
+    ret.nsSsCounts.negation();
+    ret.tsCounts.negation();
     return ret;
   }
 
   public long getNameSpace(){
-    return nsSpCounts.get(Quota.NAMESPACE);
+    return nsSsCounts.get(Quota.NAMESPACE);
   }
 
   public void setNameSpace(long nameSpaceCount) {
-    this.nsSpCounts.set(Quota.NAMESPACE, nameSpaceCount);
+    this.nsSsCounts.set(Quota.NAMESPACE, nameSpaceCount);
   }
 
   public void addNameSpace(long nsDelta) {
-    this.nsSpCounts.add(Quota.NAMESPACE, nsDelta);
+    this.nsSsCounts.add(Quota.NAMESPACE, nsDelta);
   }
 
-  public long getDiskSpace(){
-    return nsSpCounts.get(Quota.DISKSPACE);
+  public long getStorageSpace(){
+    return nsSsCounts.get(Quota.STORAGESPACE);
   }
 
-  public void setDiskSpace(long spaceCount) {
-    this.nsSpCounts.set(Quota.DISKSPACE, spaceCount);
+  public void setStorageSpace(long spaceCount) {
+    this.nsSsCounts.set(Quota.STORAGESPACE, spaceCount);
   }
 
-  public void addDiskSpace(long dsDelta) {
-    this.nsSpCounts.add(Quota.DISKSPACE, dsDelta);
+  public void addStorageSpace(long dsDelta) {
+    this.nsSsCounts.add(Quota.STORAGESPACE, dsDelta);
   }
 
   public EnumCounters<StorageType> getTypeSpaces() {
     EnumCounters<StorageType> ret =
         new EnumCounters<StorageType>(StorageType.class);
-    ret.set(typeCounts);
+    ret.set(tsCounts);
     return ret;
   }
 
   void setTypeSpaces(EnumCounters<StorageType> that) {
     if (that != null) {
-      this.typeCounts.set(that);
+      this.tsCounts.set(that);
     }
   }
 
   long getTypeSpace(StorageType type) {
-    return this.typeCounts.get(type);
+    return this.tsCounts.get(type);
   }
 
   void setTypeSpace(StorageType type, long spaceCount) {
-    this.typeCounts.set(type, spaceCount);
+    this.tsCounts.set(type, spaceCount);
   }
 
   public void addTypeSpace(StorageType type, long delta) {
-    this.typeCounts.add(type, delta);
-  }
-
-  public void addTypeSpaces(EnumCounters<StorageType> deltas) {
-    this.typeCounts.add(deltas);
+    this.tsCounts.add(type, delta);
   }
 
-  public boolean anyNsSpCountGreaterOrEqual(long val) {
-    return nsSpCounts.anyGreaterOrEqual(val);
+  public boolean anyNsSsCountGreaterOrEqual(long val) {
+    return nsSsCounts.anyGreaterOrEqual(val);
   }
 
-  public boolean anyTypeCountGreaterOrEqual(long val) {
-    return typeCounts.anyGreaterOrEqual(val);
+  public boolean anyTypeSpaceCountGreaterOrEqual(long val) {
+    return tsCounts.anyGreaterOrEqual(val);
   }
 
   @Override
@@ -167,8 +165,8 @@ public class QuotaCounts {
       return false;
     }
     final QuotaCounts that = (QuotaCounts)obj;
-    return this.nsSpCounts.equals(that.nsSpCounts)
-        && this.typeCounts.equals(that.typeCounts);
+    return this.nsSsCounts.equals(that.nsSsCounts)
+        && this.tsCounts.equals(that.tsCounts);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
index 6bcdbd7..7d1edbe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
@@ -144,7 +144,7 @@ public class FileWithSnapshotFeature implements INode.Feature {
   public QuotaCounts updateQuotaAndCollectBlocks(BlockStoragePolicySuite bsps, INodeFile file,
       FileDiff removed, BlocksMapUpdateInfo collectedBlocks,
       final List<INode> removedINodes) {
-    long oldDiskspace = file.diskspaceConsumed();
+    long oldStoragespace = file.storagespaceConsumed();
 
     byte storagePolicyID = file.getStoragePolicyID();
     BlockStoragePolicy bsp = null;
@@ -159,7 +159,7 @@ public class FileWithSnapshotFeature implements INode.Feature {
       short currentRepl = file.getBlockReplication();
       if (currentRepl == 0) {
         long oldFileSizeNoRep = file.computeFileSize(true, true);
-        oldDiskspace =  oldFileSizeNoRep * replication;
+        oldStoragespace =  oldFileSizeNoRep * replication;
 
         if (bsp != null) {
           List<StorageType> oldTypeChosen = bsp.chooseStorageTypes(replication);
@@ -170,8 +170,8 @@ public class FileWithSnapshotFeature implements INode.Feature {
           }
         }
       } else if (replication > currentRepl) {
-        long oldFileSizeNoRep = file.diskspaceConsumedNoReplication();
-        oldDiskspace = oldFileSizeNoRep * replication;
+        long oldFileSizeNoRep = file.storagespaceConsumedNoReplication();
+        oldStoragespace = oldFileSizeNoRep * replication;
 
         if (bsp != null) {
           List<StorageType> oldTypeChosen = bsp.chooseStorageTypes(replication);
@@ -197,10 +197,10 @@ public class FileWithSnapshotFeature implements INode.Feature {
     getDiffs().combineAndCollectSnapshotBlocks(
         bsps, file, removed, collectedBlocks, removedINodes);
 
-    long dsDelta = oldDiskspace - file.diskspaceConsumed();
+    long ssDelta = oldStoragespace - file.storagespaceConsumed();
     return new QuotaCounts.Builder().
-        spaceCount(dsDelta).
-        typeCounts(typeSpaces).
+        storageSpace(ssDelta).
+        typeSpaces(typeSpaces).
         build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
index bd6f76c..82709a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
@@ -571,7 +571,7 @@ message GetContentSummaryResponseProto {
 message SetQuotaRequestProto {
   required string path = 1;
   required uint64 namespaceQuota = 2;
-  required uint64 diskspaceQuota = 3;
+  required uint64 storagespaceQuota = 3;
   optional StorageTypeProto storageType = 4;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
index ede2f89..281ffb4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
@@ -84,7 +84,7 @@ public class TestDiskspaceQuotaUpdate {
     QuotaCounts cnt = fnode.asDirectory().getDirectoryWithQuotaFeature()
         .getSpaceConsumed();
     assertEquals(2, cnt.getNameSpace());
-    assertEquals(fileLen * REPLICATION, cnt.getDiskSpace());
+    assertEquals(fileLen * REPLICATION, cnt.getStorageSpace());
   }
 
   /**
@@ -108,7 +108,7 @@ public class TestDiskspaceQuotaUpdate {
     QuotaCounts quota = fooNode.getDirectoryWithQuotaFeature()
         .getSpaceConsumed();
     long ns = quota.getNameSpace();
-    long ds = quota.getDiskSpace();
+    long ds = quota.getStorageSpace();
     assertEquals(2, ns); // foo and bar
     assertEquals(currentFileLen * REPLICATION, ds);
     ContentSummary c = dfs.getContentSummary(foo);
@@ -120,7 +120,7 @@ public class TestDiskspaceQuotaUpdate {
 
     quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
     ns = quota.getNameSpace();
-    ds = quota.getDiskSpace();
+    ds = quota.getStorageSpace();
     assertEquals(2, ns); // foo and bar
     assertEquals(currentFileLen * REPLICATION, ds);
     c = dfs.getContentSummary(foo);
@@ -132,7 +132,7 @@ public class TestDiskspaceQuotaUpdate {
 
     quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
     ns = quota.getNameSpace();
-    ds = quota.getDiskSpace();
+    ds = quota.getStorageSpace();
     assertEquals(2, ns); // foo and bar
     assertEquals(currentFileLen * REPLICATION, ds);
     c = dfs.getContentSummary(foo);
@@ -159,7 +159,7 @@ public class TestDiskspaceQuotaUpdate {
     QuotaCounts quota = fooNode.getDirectoryWithQuotaFeature()
         .getSpaceConsumed();
     long ns = quota.getNameSpace();
-    long ds = quota.getDiskSpace();
+    long ds = quota.getStorageSpace();
     assertEquals(2, ns); // foo and bar
     assertEquals(BLOCKSIZE * 2 * REPLICATION, ds); // file is under construction
 
@@ -169,7 +169,7 @@ public class TestDiskspaceQuotaUpdate {
     fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
     quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
     ns = quota.getNameSpace();
-    ds = quota.getDiskSpace();
+    ds = quota.getStorageSpace();
     assertEquals(2, ns);
     assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds);
 
@@ -178,7 +178,7 @@ public class TestDiskspaceQuotaUpdate {
 
     quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
     ns = quota.getNameSpace();
-    ds = quota.getDiskSpace();
+    ds = quota.getStorageSpace();
     assertEquals(2, ns); // foo and bar
     assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
index 57f026d..c69b40c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
@@ -424,14 +424,14 @@ public class TestQuotaByStorageType {
     QuotaCounts cnt = fnode.asDirectory().getDirectoryWithQuotaFeature()
         .getSpaceConsumed();
     assertEquals(2, cnt.getNameSpace());
-    assertEquals(fileLen * REPLICATION, cnt.getDiskSpace());
+    assertEquals(fileLen * REPLICATION, cnt.getStorageSpace());
 
     dfs.delete(createdFile, true);
 
     QuotaCounts cntAfterDelete = fnode.asDirectory().getDirectoryWithQuotaFeature()
         .getSpaceConsumed();
     assertEquals(1, cntAfterDelete.getNameSpace());
-    assertEquals(0, cntAfterDelete.getDiskSpace());
+    assertEquals(0, cntAfterDelete.getStorageSpace());
 
     // Validate the computeQuotaUsage()
     QuotaCounts counts = new QuotaCounts.Builder().build();
@@ -439,7 +439,7 @@ public class TestQuotaByStorageType {
     assertEquals(fnode.dumpTreeRecursively().toString(), 1,
         counts.getNameSpace());
     assertEquals(fnode.dumpTreeRecursively().toString(), 0,
-        counts.getDiskSpace());
+        counts.getStorageSpace());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index 9edeafd..a215bee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -1200,13 +1200,13 @@ public class TestRenameWithSnapshots {
     // make sure the whole referred subtree has been destroyed
     QuotaCounts q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
     assertEquals(3, q.getNameSpace());
-    assertEquals(0, q.getDiskSpace());
+    assertEquals(0, q.getStorageSpace());
     
     hdfs.deleteSnapshot(sdir1, "s1");
     restartClusterAndCheckImage(true);
     q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
     assertEquals(3, q.getNameSpace());
-    assertEquals(0, q.getDiskSpace());
+    assertEquals(0, q.getStorageSpace());
   }
   
   /**
@@ -1602,7 +1602,7 @@ public class TestRenameWithSnapshots {
     assertTrue(dir2Node.isSnapshottable());
     QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
     assertEquals(2, counts.getNameSpace());
-    assertEquals(0, counts.getDiskSpace());
+    assertEquals(0, counts.getStorageSpace());
     childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
         .getChildrenList(Snapshot.CURRENT_STATE_ID));
     assertEquals(1, childrenList.size());
@@ -1676,7 +1676,7 @@ public class TestRenameWithSnapshots {
     assertTrue(dir2Node.isSnapshottable());
     QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
     assertEquals(3, counts.getNameSpace());
-    assertEquals(0, counts.getDiskSpace());
+    assertEquals(0, counts.getStorageSpace());
     childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
         .getChildrenList(Snapshot.CURRENT_STATE_ID));
     assertEquals(1, childrenList.size());
@@ -1793,7 +1793,7 @@ public class TestRenameWithSnapshots {
     QuotaCounts counts = dir2Node.computeQuotaUsage(
         fsdir.getBlockStoragePolicySuite());
     assertEquals(4, counts.getNameSpace());
-    assertEquals(BLOCKSIZE * REPL * 2, counts.getDiskSpace());
+    assertEquals(BLOCKSIZE * REPL * 2, counts.getStorageSpace());
   }
   
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6aa6cbf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index c494322..a679183 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -207,13 +207,13 @@ public class TestSnapshotDeletion {
     assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
         q.getNameSpace());
     assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
-        q.getDiskSpace());
+        q.getStorageSpace());
     QuotaCounts counts = new QuotaCounts.Builder().build();
     dirNode.computeQuotaUsage(fsdir.getBlockStoragePolicySuite(), counts, false);
     assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
         counts.getNameSpace());
     assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
-        counts.getDiskSpace());
+        counts.getStorageSpace());
   }
   
   /**