You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zj...@apache.org on 2015/03/27 07:34:45 UTC

[43/50] [abbrv] hadoop git commit: HDFS-7824. GetContentSummary API and its namenode implementation for Storage Type Quota/Usage. (Contributed by Xiaoyu Yao)

HDFS-7824. GetContentSummary API and its namenode implementation for Storage Type Quota/Usage. (Contributed by Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd1081b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd1081b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd1081b3

Branch: refs/heads/YARN-2928
Commit: bd1081b3ca08692c67c533c2c190c8dd6cfaf400
Parents: b1ab3f2
Author: Arpit Agarwal <ar...@apache.org>
Authored: Thu Mar 26 10:24:11 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:49 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/fs/ContentSummary.java    | 155 ++++++++++++++++++-
 .../java/org/apache/hadoop/fs/FileContext.java  |  15 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  12 +-
 .../apache/hadoop/fs/TestContentSummary.java    |  52 ++++---
 .../org/apache/hadoop/fs/shell/TestCount.java   |   2 +
 .../hadoop/fs/http/client/HttpFSFileSystem.java |  14 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  42 ++++-
 .../server/blockmanagement/BlockCollection.java |   2 +-
 .../server/blockmanagement/BlockManager.java    |   2 +-
 .../hdfs/server/namenode/ContentCounts.java     | 146 +++++++++++++++++
 .../ContentSummaryComputationContext.java       |  25 +--
 .../namenode/DirectoryWithQuotaFeature.java     |   6 +-
 .../hadoop/hdfs/server/namenode/INode.java      |  23 +--
 .../hdfs/server/namenode/INodeDirectory.java    |   2 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  28 +++-
 .../hdfs/server/namenode/INodeReference.java    |   4 +-
 .../hdfs/server/namenode/INodeSymlink.java      |   2 +-
 .../snapshot/DirectorySnapshottableFeature.java |   4 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  |   9 +-
 .../apache/hadoop/hdfs/util/EnumCounters.java   |   6 +
 .../org/apache/hadoop/hdfs/web/JsonUtil.java    |   5 +-
 .../hadoop-hdfs/src/main/proto/hdfs.proto       |  14 ++
 .../blockmanagement/TestReplicationPolicy.java  |   2 +-
 .../server/namenode/TestQuotaByStorageType.java |  80 ++++++++++
 25 files changed, 563 insertions(+), 92 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
index 6276dda..66137d0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
@@ -21,6 +21,7 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Writable;
@@ -36,17 +37,106 @@ public class ContentSummary implements Writable{
   private long quota;
   private long spaceConsumed;
   private long spaceQuota;
-  
+  private long typeConsumed[];
+  private long typeQuota[];
+
+  public static class Builder{
+    public Builder() {
+      this.quota = -1;
+      this.spaceQuota = -1;
+
+      typeConsumed = new long[StorageType.values().length];
+      typeQuota = new long[StorageType.values().length];
+      for (int i = 0; i < typeQuota.length; i++) {
+        typeQuota[i] = -1;
+      }
+    }
+
+    public Builder length(long length) {
+      this.length = length;
+      return this;
+    }
+
+    public Builder fileCount(long fileCount) {
+      this.fileCount = fileCount;
+      return this;
+    }
+
+    public Builder directoryCount(long directoryCount) {
+      this.directoryCount = directoryCount;
+      return this;
+    }
+
+    public Builder quota(long quota){
+      this.quota = quota;
+      return this;
+    }
+
+    public Builder spaceConsumed(long spaceConsumed) {
+      this.spaceConsumed = spaceConsumed;
+      return this;
+    }
+
+    public Builder spaceQuota(long spaceQuota) {
+      this.spaceQuota = spaceQuota;
+      return this;
+    }
+
+    public Builder typeConsumed(long typeConsumed[]) {
+      for (int i = 0; i < typeConsumed.length; i++) {
+        this.typeConsumed[i] = typeConsumed[i];
+      }
+      return this;
+    }
+
+    public Builder typeQuota(StorageType type, long quota) {
+      this.typeQuota[type.ordinal()] = quota;
+      return this;
+    }
+
+    public Builder typeConsumed(StorageType type, long consumed) {
+      this.typeConsumed[type.ordinal()] = consumed;
+      return this;
+    }
+
+    public Builder typeQuota(long typeQuota[]) {
+      for (int i = 0; i < typeQuota.length; i++) {
+        this.typeQuota[i] = typeQuota[i];
+      }
+      return this;
+    }
+
+    public ContentSummary build() {
+      return new ContentSummary(length, fileCount, directoryCount, quota,
+          spaceConsumed, spaceQuota, typeConsumed, typeQuota);
+    }
+
+    private long length;
+    private long fileCount;
+    private long directoryCount;
+    private long quota;
+    private long spaceConsumed;
+    private long spaceQuota;
+    private long typeConsumed[];
+    private long typeQuota[];
+  }
 
-  /** Constructor */
+  /** Constructor deprecated by ContentSummary.Builder*/
+  @Deprecated
   public ContentSummary() {}
   
-  /** Constructor */
+  /** Constructor, deprecated by ContentSummary.Builder
+   *  This constructor implicitly set spaceConsumed the same as length.
+   *  spaceConsumed and length must be set explicitly with
+   *  ContentSummary.Builder
+   * */
+  @Deprecated
   public ContentSummary(long length, long fileCount, long directoryCount) {
     this(length, fileCount, directoryCount, -1L, length, -1L);
   }
 
-  /** Constructor */
+  /** Constructor, deprecated by ContentSummary.Builder */
+  @Deprecated
   public ContentSummary(
       long length, long fileCount, long directoryCount, long quota,
       long spaceConsumed, long spaceQuota) {
@@ -58,6 +148,21 @@ public class ContentSummary implements Writable{
     this.spaceQuota = spaceQuota;
   }
 
+  /** Constructor for ContentSummary.Builder*/
+  private ContentSummary(
+      long length, long fileCount, long directoryCount, long quota,
+      long spaceConsumed, long spaceQuota, long typeConsumed[],
+      long typeQuota[]) {
+    this.length = length;
+    this.fileCount = fileCount;
+    this.directoryCount = directoryCount;
+    this.quota = quota;
+    this.spaceConsumed = spaceConsumed;
+    this.spaceQuota = spaceQuota;
+    this.typeConsumed = typeConsumed;
+    this.typeQuota = typeQuota;
+  }
+
   /** @return the length */
   public long getLength() {return length;}
 
@@ -70,12 +175,48 @@ public class ContentSummary implements Writable{
   /** Return the directory quota */
   public long getQuota() {return quota;}
   
-  /** Retuns (disk) space consumed */ 
+  /** Retuns storage space consumed */
   public long getSpaceConsumed() {return spaceConsumed;}
 
-  /** Returns (disk) space quota */
+  /** Returns storage space quota */
   public long getSpaceQuota() {return spaceQuota;}
-  
+
+  /** Returns storage type quota */
+  public long getTypeQuota(StorageType type) {
+    return (typeQuota != null) ? typeQuota[type.ordinal()] : -1;
+  }
+
+  /** Returns storage type consumed*/
+  public long getTypeConsumed(StorageType type) {
+    return (typeConsumed != null) ? typeConsumed[type.ordinal()] : 0;
+  }
+
+  /** Returns true if any storage type quota has been set*/
+  public boolean isTypeQuotaSet() {
+    if (typeQuota == null) {
+      return false;
+    }
+    for (StorageType t : StorageType.getTypesSupportingQuota()) {
+      if (typeQuota[t.ordinal()] > 0) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /** Returns true if any storage type consumption information is available*/
+  public boolean isTypeConsumedAvailable() {
+    if (typeConsumed == null) {
+      return false;
+    }
+    for (StorageType t : StorageType.getTypesSupportingQuota()) {
+      if (typeConsumed[t.ordinal()] > 0) {
+        return true;
+      }
+    }
+    return false;
+  }
+
   @Override
   @InterfaceAudience.Private
   public void write(DataOutput out) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 2713144..aad8be9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -1644,20 +1644,27 @@ public class FileContext {
         UnsupportedFileSystemException, IOException {
       FileStatus status = FileContext.this.getFileStatus(f);
       if (status.isFile()) {
-        return new ContentSummary(status.getLen(), 1, 0);
+        long length = status.getLen();
+        return new ContentSummary.Builder().length(length).
+            fileCount(1).directoryCount(0).spaceConsumed(length).
+            build();
       }
       long[] summary = {0, 0, 1};
-      RemoteIterator<FileStatus> statusIterator = 
+      RemoteIterator<FileStatus> statusIterator =
         FileContext.this.listStatus(f);
       while(statusIterator.hasNext()) {
         FileStatus s = statusIterator.next();
+        long length = s.getLen();
         ContentSummary c = s.isDirectory() ? getContentSummary(s.getPath()) :
-                                       new ContentSummary(s.getLen(), 1, 0);
+            new ContentSummary.Builder().length(length).fileCount(1).
+            directoryCount(0).spaceConsumed(length).build();
         summary[0] += c.getLength();
         summary[1] += c.getFileCount();
         summary[2] += c.getDirectoryCount();
       }
-      return new ContentSummary(summary[0], summary[1], summary[2]);
+      return new ContentSummary.Builder().length(summary[0]).
+          fileCount(summary[1]).directoryCount(summary[2]).
+          spaceConsumed(summary[0]).build();
     }
     
     /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 2ca8813..305fef2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1467,18 +1467,24 @@ public abstract class FileSystem extends Configured implements Closeable {
     FileStatus status = getFileStatus(f);
     if (status.isFile()) {
       // f is a file
-      return new ContentSummary(status.getLen(), 1, 0);
+      long length = status.getLen();
+      return new ContentSummary.Builder().length(length).
+          fileCount(1).directoryCount(0).spaceConsumed(length).build();
     }
     // f is a directory
     long[] summary = {0, 0, 1};
     for(FileStatus s : listStatus(f)) {
+      long length = s.getLen();
       ContentSummary c = s.isDirectory() ? getContentSummary(s.getPath()) :
-                                     new ContentSummary(s.getLen(), 1, 0);
+          new ContentSummary.Builder().length(length).
+          fileCount(1).directoryCount(0).spaceConsumed(length).build();
       summary[0] += c.getLength();
       summary[1] += c.getFileCount();
       summary[2] += c.getDirectoryCount();
     }
-    return new ContentSummary(summary[0], summary[1], summary[2]);
+    return new ContentSummary.Builder().length(summary[0]).
+        fileCount(summary[1]).directoryCount(summary[2]).
+        spaceConsumed(summary[0]).build();
   }
 
   final private static PathFilter DEFAULT_FILTER = new PathFilter() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java
index 5db0de3..7cc7ae4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java
@@ -32,13 +32,13 @@ public class TestContentSummary {
   // check the empty constructor correctly initialises the object
   @Test
   public void testConstructorEmpty() {
-    ContentSummary contentSummary = new ContentSummary();
+    ContentSummary contentSummary = new ContentSummary.Builder().build();
     assertEquals("getLength", 0, contentSummary.getLength());
     assertEquals("getFileCount", 0, contentSummary.getFileCount());
     assertEquals("getDirectoryCount", 0, contentSummary.getDirectoryCount());
-    assertEquals("getQuota", 0, contentSummary.getQuota());
+    assertEquals("getQuota", -1, contentSummary.getQuota());
     assertEquals("getSpaceConsumed", 0, contentSummary.getSpaceConsumed());
-    assertEquals("getSpaceQuota", 0, contentSummary.getSpaceQuota());
+    assertEquals("getSpaceQuota", -1, contentSummary.getSpaceQuota());
   }
 
   // check the full constructor with quota information
@@ -51,8 +51,9 @@ public class TestContentSummary {
     long spaceConsumed = 55555;
     long spaceQuota = 66666;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount, quota, spaceConsumed, spaceQuota);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).quota(quota).
+        spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
     assertEquals("getLength", length, contentSummary.getLength());
     assertEquals("getFileCount", fileCount, contentSummary.getFileCount());
     assertEquals("getDirectoryCount", directoryCount,
@@ -70,8 +71,9 @@ public class TestContentSummary {
     long fileCount = 22222;
     long directoryCount = 33333;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).
+        spaceConsumed(length).build();
     assertEquals("getLength", length, contentSummary.getLength());
     assertEquals("getFileCount", fileCount, contentSummary.getFileCount());
     assertEquals("getDirectoryCount", directoryCount,
@@ -91,8 +93,9 @@ public class TestContentSummary {
     long spaceConsumed = 55555;
     long spaceQuota = 66666;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount, quota, spaceConsumed, spaceQuota);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).quota(quota).
+        spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
 
     DataOutput out = mock(DataOutput.class);
     InOrder inOrder = inOrder(out);
@@ -116,7 +119,7 @@ public class TestContentSummary {
     long spaceConsumed = 55555;
     long spaceQuota = 66666;
 
-    ContentSummary contentSummary = new ContentSummary();
+    ContentSummary contentSummary = new ContentSummary.Builder().build();
 
     DataInput in = mock(DataInput.class);
     when(in.readLong()).thenReturn(length).thenReturn(fileCount)
@@ -159,8 +162,9 @@ public class TestContentSummary {
     long spaceConsumed = 55555;
     long spaceQuota = 66665;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount, quota, spaceConsumed, spaceQuota);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).quota(quota).
+        spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
     String expected = "       44444          -11111           66665           11110"
         + "        33333        22222              11111 ";
     assertEquals(expected, contentSummary.toString(true));
@@ -173,8 +177,8 @@ public class TestContentSummary {
     long fileCount = 22222;
     long directoryCount = 33333;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).build();
     String expected = "        none             inf            none"
         + "             inf        33333        22222              11111 ";
     assertEquals(expected, contentSummary.toString(true));
@@ -190,8 +194,9 @@ public class TestContentSummary {
     long spaceConsumed = 55555;
     long spaceQuota = 66665;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount, quota, spaceConsumed, spaceQuota);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).quota(quota).
+        spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
     String expected = "       33333        22222              11111 ";
     assertEquals(expected, contentSummary.toString(false));
   }
@@ -206,8 +211,9 @@ public class TestContentSummary {
     long spaceConsumed = 55555;
     long spaceQuota = 66665;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount, quota, spaceConsumed, spaceQuota);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).quota(quota).
+        spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
     String expected = "       44444          -11111           66665"
         + "           11110        33333        22222              11111 ";
     assertEquals(expected, contentSummary.toString());
@@ -223,8 +229,9 @@ public class TestContentSummary {
     long spaceConsumed = 1073741825;
     long spaceQuota = 1;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount, quota, spaceConsumed, spaceQuota);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).quota(quota).
+        spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
     String expected = "     212.0 M            1023               1 "
         + "           -1 G       32.6 K      211.9 M              8.0 E ";
     assertEquals(expected, contentSummary.toString(true, true));
@@ -240,8 +247,9 @@ public class TestContentSummary {
     long spaceConsumed = 55555;
     long spaceQuota = Long.MAX_VALUE;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount, quota, spaceConsumed, spaceQuota);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).quota(quota).
+        spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
     String expected = "      32.6 K      211.9 M              8.0 E ";
     assertEquals(expected, contentSummary.toString(false, true));
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
index 1f2f2d4..d5f097d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
@@ -315,6 +315,8 @@ public class TestCount {
   // mock content system
   static class MockContentSummary extends ContentSummary {
 
+    @SuppressWarnings("deprecation")
+    // suppress warning on the usage of deprecated ContentSummary constructor
     public MockContentSummary() {
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 20b212e..e797d12 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -1013,13 +1013,13 @@ public class HttpFSFileSystem extends FileSystem
     HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
     JSONObject json = (JSONObject) ((JSONObject)
       HttpFSUtils.jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
-    return new ContentSummary((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON),
-                              (Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON),
-                              (Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON),
-                              (Long) json.get(CONTENT_SUMMARY_QUOTA_JSON),
-                              (Long) json.get(CONTENT_SUMMARY_SPACE_CONSUMED_JSON),
-                              (Long) json.get(CONTENT_SUMMARY_SPACE_QUOTA_JSON)
-    );
+    return new ContentSummary.Builder().
+        length((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON)).
+        fileCount((Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON)).
+        directoryCount((Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON)).
+        quota((Long) json.get(CONTENT_SUMMARY_QUOTA_JSON)).
+        spaceConsumed((Long) json.get(CONTENT_SUMMARY_SPACE_CONSUMED_JSON)).
+        spaceQuota((Long) json.get(CONTENT_SUMMARY_SPACE_QUOTA_JSON)).build();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 51842ff..e16348a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1300,6 +1300,9 @@ Release 2.7.0 - UNRELEASED
       HDFS-7806. Refactor: move StorageType from hadoop-hdfs to
       hadoop-common. (Xiaoyu Yao via Arpit Agarwal)
 
+      HDFS-7824. GetContentSummary API and its namenode implementation for
+      Storage Type Quota/Usage. (Xiaoyu Yao via Arpit Agarwal)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index b841850..9446b70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -1728,21 +1728,49 @@ public class PBHelper {
   
   public static ContentSummary convert(ContentSummaryProto cs) {
     if (cs == null) return null;
-    return new ContentSummary(
-      cs.getLength(), cs.getFileCount(), cs.getDirectoryCount(), cs.getQuota(),
-      cs.getSpaceConsumed(), cs.getSpaceQuota());
+    ContentSummary.Builder builder = new ContentSummary.Builder();
+    builder.length(cs.getLength()).
+        fileCount(cs.getFileCount()).
+        directoryCount(cs.getDirectoryCount()).
+        quota(cs.getQuota()).
+        spaceConsumed(cs.getSpaceConsumed()).
+        spaceQuota(cs.getSpaceQuota());
+    if (cs.hasTypeQuotaInfos()) {
+      for (HdfsProtos.StorageTypeQuotaInfoProto info :
+          cs.getTypeQuotaInfos().getTypeQuotaInfoList()) {
+        StorageType type = PBHelper.convertStorageType(info.getType());
+        builder.typeConsumed(type, info.getConsumed());
+        builder.typeQuota(type, info.getQuota());
+      }
+    }
+    return builder.build();
   }
   
   public static ContentSummaryProto convert(ContentSummary cs) {
     if (cs == null) return null;
-    return ContentSummaryProto.newBuilder().
-        setLength(cs.getLength()).
+    ContentSummaryProto.Builder builder = ContentSummaryProto.newBuilder();
+        builder.setLength(cs.getLength()).
         setFileCount(cs.getFileCount()).
         setDirectoryCount(cs.getDirectoryCount()).
         setQuota(cs.getQuota()).
         setSpaceConsumed(cs.getSpaceConsumed()).
-        setSpaceQuota(cs.getSpaceQuota()).
-        build();
+        setSpaceQuota(cs.getSpaceQuota());
+
+    if (cs.isTypeQuotaSet() || cs.isTypeConsumedAvailable()) {
+      HdfsProtos.StorageTypeQuotaInfosProto.Builder isb =
+          HdfsProtos.StorageTypeQuotaInfosProto.newBuilder();
+      for (StorageType t: StorageType.getTypesSupportingQuota()) {
+        HdfsProtos.StorageTypeQuotaInfoProto info =
+            HdfsProtos.StorageTypeQuotaInfoProto.newBuilder().
+                setType(convertStorageType(t)).
+                setConsumed(cs.getTypeConsumed(t)).
+                setQuota(cs.getTypeQuota(t)).
+                build();
+        isb.addTypeQuotaInfo(info);
+      }
+      builder.setTypeQuotaInfos(isb);
+    }
+    return builder.build();
   }
 
   public static NNHAStatusHeartbeat convert(NNHAStatusHeartbeatProto s) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index 1547611..e9baf85 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -36,7 +36,7 @@ public interface BlockCollection {
   /** 
    * Get content summary.
    */
-  public ContentSummary computeContentSummary();
+  public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps);
 
   /**
    * @return the number of blocks

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 91cfead..ad40782 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -745,7 +745,7 @@ public class BlockManager {
         // always decrement total blocks
         -1);
 
-    final long fileLength = bc.computeContentSummary().getLength();
+    final long fileLength = bc.computeContentSummary(getStoragePolicySuite()).getLength();
     final long pos = fileLength - ucBlock.getNumBytes();
     return createLocatedBlock(ucBlock, pos, AccessMode.WRITE);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentCounts.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentCounts.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentCounts.java
new file mode 100644
index 0000000..16f0771
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentCounts.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.util.EnumCounters;
+
+/**
+ * The counter to be computed for content types such as file, directory and symlink,
+ * and the storage type usage such as SSD, DISK, ARCHIVE.
+ */
+public class ContentCounts {
+  private EnumCounters<Content> contents;
+  private EnumCounters<StorageType> types;
+
+  public static class Builder {
+    private EnumCounters<Content> contents;
+    // storage spaces used by corresponding storage types
+    private EnumCounters<StorageType> types;
+
+    public Builder() {
+      contents = new EnumCounters<Content>(Content.class);
+      types = new EnumCounters<StorageType>(StorageType.class);
+    }
+
+    public Builder file(long file) {
+      contents.set(Content.FILE, file);
+      return this;
+    }
+
+    public Builder directory(long directory) {
+      contents.set(Content.DIRECTORY, directory);
+      return this;
+    }
+
+    public Builder symlink(long symlink) {
+      contents.set(Content.SYMLINK, symlink);
+      return this;
+    }
+
+    public Builder length(long length) {
+      contents.set(Content.LENGTH, length);
+      return this;
+    }
+
+    public Builder storagespace(long storagespace) {
+      contents.set(Content.DISKSPACE, storagespace);
+      return this;
+    }
+
+    public Builder snapshot(long snapshot) {
+      contents.set(Content.SNAPSHOT, snapshot);
+      return this;
+    }
+
+    public Builder snapshotable_directory(long snapshotable_directory) {
+      contents.set(Content.SNAPSHOTTABLE_DIRECTORY, snapshotable_directory);
+      return this;
+    }
+
+    public ContentCounts build() {
+      return new ContentCounts(contents, types);
+    }
+  }
+
+  private ContentCounts(EnumCounters<Content> contents,
+      EnumCounters<StorageType> types) {
+    this.contents = contents;
+    this.types = types;
+  }
+
+  // Get the number of files.
+  public long getFileCount() {
+    return contents.get(Content.FILE);
+  }
+
+  // Get the number of directories.
+  public long getDirectoryCount() {
+    return contents.get(Content.DIRECTORY);
+  }
+
+  // Get the number of symlinks.
+  public long getSymlinkCount() {
+    return contents.get(Content.SYMLINK);
+  }
+
+  // Get the total of file length in bytes.
+  public long getLength() {
+    return contents.get(Content.LENGTH);
+  }
+
+  // Get the total of storage space usage in bytes including replication.
+  public long getStoragespace() {
+    return contents.get(Content.DISKSPACE);
+  }
+
+  // Get the number of snapshots
+  public long getSnapshotCount() {
+    return contents.get(Content.SNAPSHOT);
+  }
+
+  // Get the number of snapshottable directories.
+  public long getSnapshotableDirectoryCount() {
+    return contents.get(Content.SNAPSHOTTABLE_DIRECTORY);
+  }
+
+  public long[] getTypeSpaces() {
+    return types.asArray();
+  }
+
+  public long getTypeSpace(StorageType t) {
+    return types.get(t);
+  }
+
+  public void addContent(Content c, long val) {
+    contents.add(c, val);
+  }
+
+  public void addContents(ContentCounts that) {
+    contents.add(that.contents);
+    types.add(that.types);
+  }
+
+  public void addTypeSpace(StorageType t, long val) {
+    types.add(t, val);
+  }
+
+  public void addTypeSpaces(EnumCounters<StorageType> that) {
+    this.types.add(that);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 63fa8c1..31f34b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
@@ -26,7 +27,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 public class ContentSummaryComputationContext {
   private FSDirectory dir = null;
   private FSNamesystem fsn = null;
-  private Content.Counts counts = null;
+  private BlockStoragePolicySuite bsps = null;
+  private ContentCounts counts = null;
   private long nextCountLimit = 0;
   private long limitPerRun = 0;
   private long yieldCount = 0;
@@ -46,12 +48,13 @@ public class ContentSummaryComputationContext {
     this.fsn = fsn;
     this.limitPerRun = limitPerRun;
     this.nextCountLimit = limitPerRun;
-    this.counts = Content.Counts.newInstance();
+    this.counts = new ContentCounts.Builder().build();
   }
 
   /** Constructor for blocking computation. */
-  public ContentSummaryComputationContext() {
+  public ContentSummaryComputationContext(BlockStoragePolicySuite bsps) {
     this(null, null, 0);
+    this.bsps = bsps;
   }
 
   /** Return current yield count */
@@ -73,10 +76,10 @@ public class ContentSummaryComputationContext {
     }
 
     // Have we reached the limit?
-    long currentCount = counts.get(Content.FILE) +
-        counts.get(Content.SYMLINK) +
-        counts.get(Content.DIRECTORY) +
-        counts.get(Content.SNAPSHOTTABLE_DIRECTORY);
+    long currentCount = counts.getFileCount() +
+        counts.getSymlinkCount() +
+        counts.getDirectoryCount() +
+        counts.getSnapshotableDirectoryCount();
     if (currentCount <= nextCountLimit) {
       return false;
     }
@@ -114,11 +117,15 @@ public class ContentSummaryComputationContext {
   }
 
   /** Get the content counts */
-  public Content.Counts getCounts() {
+  public ContentCounts getCounts() {
     return counts;
   }
 
   public BlockStoragePolicySuite getBlockStoragePolicySuite() {
-      return fsn.getBlockManager().getStoragePolicySuite();
+    Preconditions.checkState((bsps != null || fsn != null),
+        "BlockStoragePolicySuite must be either initialized or available via" +
+            " FSNameSystem");
+    return (bsps != null) ? bsps:
+        fsn.getBlockManager().getStoragePolicySuite();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
index 01eb22f..31b45ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
@@ -126,12 +126,12 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
 
   ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir,
       final ContentSummaryComputationContext summary) {
-    final long original = summary.getCounts().get(Content.DISKSPACE);
+    final long original = summary.getCounts().getStoragespace();
     long oldYieldCount = summary.getYieldCount();
     dir.computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID);
     // Check only when the content has not changed in the middle.
     if (oldYieldCount == summary.getYieldCount()) {
-      checkStoragespace(dir, summary.getCounts().get(Content.DISKSPACE) - original);
+      checkStoragespace(dir, summary.getCounts().getStoragespace() - original);
     }
     return summary;
   }
@@ -277,4 +277,4 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
     return "Quota[" + namespaceString() + ", " + storagespaceString() +
         ", " + typeSpaceString() + "]";
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 8c4e466..586cce4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -432,9 +432,9 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
       BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes);
 
   /** Compute {@link ContentSummary}. Blocking call */
-  public final ContentSummary computeContentSummary() {
+  public final ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) {
     return computeAndConvertContentSummary(
-        new ContentSummaryComputationContext());
+        new ContentSummaryComputationContext(bsps));
   }
 
   /**
@@ -442,17 +442,22 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
    */
   public final ContentSummary computeAndConvertContentSummary(
       ContentSummaryComputationContext summary) {
-    Content.Counts counts = computeContentSummary(summary).getCounts();
+    ContentCounts counts = computeContentSummary(summary).getCounts();
     final QuotaCounts q = getQuotaCounts();
-    return new ContentSummary(counts.get(Content.LENGTH),
-        counts.get(Content.FILE) + counts.get(Content.SYMLINK),
-        counts.get(Content.DIRECTORY), q.getNameSpace(),
-        counts.get(Content.DISKSPACE), q.getStorageSpace());
-    // TODO: storage type quota reporting HDFS-7701.
+    return new ContentSummary.Builder().
+        length(counts.getLength()).
+        fileCount(counts.getFileCount() + counts.getSymlinkCount()).
+        directoryCount(counts.getDirectoryCount()).
+        quota(q.getNameSpace()).
+        spaceConsumed(counts.getStoragespace()).
+        spaceQuota(q.getStorageSpace()).
+        typeConsumed(counts.getTypeSpaces()).
+        typeQuota(q.getTypeSpaces().asArray()).
+        build();
   }
 
   /**
-   * Count subtree content summary with a {@link Content.Counts}.
+   * Count subtree content summary with a {@link ContentCounts}.
    *
    * @param summary the context object holding counts for the subtree.
    * @return The same objects as summary.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index ebb8ae4..dadb8c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -664,7 +664,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
     }
 
     // Increment the directory count for this directory.
-    summary.getCounts().add(Content.DIRECTORY, 1);
+    summary.getCounts().addContent(Content.DIRECTORY, 1);
     // Relinquish and reacquire locks if necessary.
     summary.yield();
     return summary;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index ae554fe..a6f07f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -599,22 +599,36 @@ public class INodeFile extends INodeWithAdditionalFields
   @Override
   public final ContentSummaryComputationContext computeContentSummary(
       final ContentSummaryComputationContext summary) {
-    final Content.Counts counts = summary.getCounts();
+    final ContentCounts counts = summary.getCounts();
     FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
+    long fileLen = 0;
     if (sf == null) {
-      counts.add(Content.LENGTH, computeFileSize());
-      counts.add(Content.FILE, 1);
+      fileLen = computeFileSize();
+      counts.addContent(Content.FILE, 1);
     } else {
       final FileDiffList diffs = sf.getDiffs();
       final int n = diffs.asList().size();
-      counts.add(Content.FILE, n);
+      counts.addContent(Content.FILE, n);
       if (n > 0 && sf.isCurrentFileDeleted()) {
-        counts.add(Content.LENGTH, diffs.getLast().getFileSize());
+        fileLen =  diffs.getLast().getFileSize();
       } else {
-        counts.add(Content.LENGTH, computeFileSize());
+        fileLen = computeFileSize();
+      }
+    }
+    counts.addContent(Content.LENGTH, fileLen);
+    counts.addContent(Content.DISKSPACE, storagespaceConsumed());
+
+    if (getStoragePolicyID() != BlockStoragePolicySuite.ID_UNSPECIFIED){
+      BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite().
+          getPolicy(getStoragePolicyID());
+      List<StorageType> storageTypes = bsp.chooseStorageTypes(getFileReplication());
+      for (StorageType t : storageTypes) {
+        if (!t.supportTypeQuota()) {
+          continue;
+        }
+        counts.addTypeSpace(t, fileLen);
       }
     }
-    counts.add(Content.DISKSPACE, storagespaceConsumed());
     return summary;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index 911279a..eee50a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -23,6 +23,7 @@ import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
 
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -512,7 +513,8 @@ public abstract class INodeReference extends INode {
       //only count storagespace for WithName
       final QuotaCounts q = new QuotaCounts.Builder().build();
       computeQuotaUsage(summary.getBlockStoragePolicySuite(), q, false, lastSnapshotId);
-      summary.getCounts().add(Content.DISKSPACE, q.getStorageSpace());
+      summary.getCounts().addContent(Content.DISKSPACE, q.getStorageSpace());
+      summary.getCounts().addTypeSpaces(q.getTypeSpaces());
       return summary;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
index fe75687..120d0dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
@@ -102,7 +102,7 @@ public class INodeSymlink extends INodeWithAdditionalFields {
   @Override
   public ContentSummaryComputationContext computeContentSummary(
       final ContentSummaryComputationContext summary) {
-    summary.getCounts().add(Content.SYMLINK, 1);
+    summary.getCounts().addContent(Content.SYMLINK, 1);
     return summary;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
index 5168f0b..fa1bf94 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
@@ -237,8 +237,8 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
       final INodeDirectory snapshotRoot,
       final ContentSummaryComputationContext summary) {
     snapshotRoot.computeContentSummary(summary);
-    summary.getCounts().add(Content.SNAPSHOT, snapshotsByNames.size());
-    summary.getCounts().add(Content.SNAPSHOTTABLE_DIRECTORY, 1);
+    summary.getCounts().addContent(Content.SNAPSHOT, snapshotsByNames.size());
+    summary.getCounts().addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
     return summary;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 07ff744..d55332f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.namenode.AclStorage;
 import org.apache.hadoop.hdfs.server.namenode.Content;
+import org.apache.hadoop.hdfs.server.namenode.ContentCounts;
 import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
 import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
 import org.apache.hadoop.hdfs.server.namenode.INode;
@@ -650,19 +651,19 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
   }
 
   public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
-      final Content.Counts counts) {
+      final ContentCounts counts) {
     // Create a new blank summary context for blocking processing of subtree.
     ContentSummaryComputationContext summary = 
-        new ContentSummaryComputationContext();
+        new ContentSummaryComputationContext(bsps);
     for(DirectoryDiff d : diffs) {
       for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
         deleted.computeContentSummary(summary);
       }
     }
     // Add the counts from deleted trees.
-    counts.add(summary.getCounts());
+    counts.addContents(summary.getCounts());
     // Add the deleted directory count.
-    counts.add(Content.DIRECTORY, diffs.asList().size());
+    counts.addContent(Content.DIRECTORY, diffs.asList().size());
   }
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
index 18f4bd6..86ba341 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
@@ -21,6 +21,7 @@ import java.util.Arrays;
 import java.util.HashMap;
 
 import com.google.common.base.Preconditions;
+import org.apache.commons.lang.ArrayUtils;
 
 /**
  * Counters for an enum type.
@@ -64,6 +65,11 @@ public class EnumCounters<E extends Enum<E>> {
     return counters[e.ordinal()];
   }
 
+  /** @return the values of counter as a shadow copy of array*/
+  public long[] asArray() {
+    return ArrayUtils.clone(counters);
+  }
+
   /** Negate all counters. */
   public final void negation() {
     for(int i = 0; i < counters.length; i++) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index ae9612f..d53bc31 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -553,8 +553,9 @@ public class JsonUtil {
     final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
     final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
 
-    return new ContentSummary(length, fileCount, directoryCount,
-        quota, spaceConsumed, spaceQuota);
+    return new ContentSummary.Builder().length(length).fileCount(fileCount).
+        directoryCount(directoryCount).quota(quota).spaceConsumed(spaceConsumed).
+        spaceQuota(spaceQuota).build();
   }
 
   /** Convert a MD5MD5CRC32FileChecksum to a Json string. */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
index 2966e51..7d94f04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
@@ -134,6 +134,20 @@ message ContentSummaryProto {
   required uint64 quota = 4;
   required uint64 spaceConsumed = 5;
   required uint64 spaceQuota = 6;
+  optional StorageTypeQuotaInfosProto typeQuotaInfos = 7;
+}
+
+/**
+ * Storage type quota and usage information of a file or directory
+ */
+message StorageTypeQuotaInfosProto {
+  repeated StorageTypeQuotaInfoProto typeQuotaInfo = 1;
+}
+
+message StorageTypeQuotaInfoProto {
+  required StorageTypeProto type = 1;
+  required uint64 quota = 2;
+  required uint64 consumed = 3;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 485cb9b..32fae45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -1221,7 +1221,7 @@ public class TestReplicationPolicy {
     when(mbc.isUnderConstruction()).thenReturn(true);
     ContentSummary cs = mock(ContentSummary.class);
     when(cs.getLength()).thenReturn((long)1);
-    when(mbc.computeContentSummary()).thenReturn(cs);
+    when(mbc.computeContentSummary(bm.getStoragePolicySuite())).thenReturn(cs);
     info.setBlockCollection(mbc);
     bm.addBlockCollection(info, mbc);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
index aee756f..6d38937 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
@@ -24,6 +24,7 @@ package org.apache.hadoop.hdfs.server.namenode;
   import org.apache.commons.logging.Log;
   import org.apache.commons.logging.LogFactory;
   import org.apache.hadoop.conf.Configuration;
+  import org.apache.hadoop.fs.ContentSummary;
   import org.apache.hadoop.fs.Path;
   import org.apache.hadoop.fs.StorageType;
   import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -156,6 +157,11 @@ public class TestQuotaByStorageType {
     ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
         .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
     assertEquals(file1Len, ssdConsumed);
+
+    ContentSummary cs = dfs.getContentSummary(foo);
+    assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
+    assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
+    assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
   }
 
   @Test(timeout = 60000)
@@ -192,6 +198,11 @@ public class TestQuotaByStorageType {
     fnode.computeQuotaUsage(fsn.getBlockManager().getStoragePolicySuite(), counts, true);
     assertEquals(fnode.dumpTreeRecursively().toString(), 0,
         counts.getTypeSpaces().get(StorageType.SSD));
+
+    ContentSummary cs = dfs.getContentSummary(foo);
+    assertEquals(cs.getSpaceConsumed(), 0);
+    assertEquals(cs.getTypeConsumed(StorageType.SSD), 0);
+    assertEquals(cs.getTypeConsumed(StorageType.DISK), 0);
   }
 
   @Test(timeout = 60000)
@@ -233,6 +244,11 @@ public class TestQuotaByStorageType {
     } catch (Throwable t) {
       LOG.info("Got expected exception ", t);
     }
+
+    ContentSummary cs = dfs.getContentSummary(foo);
+    assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
+    assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
+    assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
   }
 
   /**
@@ -554,6 +570,11 @@ public class TestQuotaByStorageType {
     assertEquals(sub1Node.dumpTreeRecursively().toString(), file1Len,
         counts1.getTypeSpaces().get(StorageType.SSD));
 
+    ContentSummary cs1 = dfs.getContentSummary(sub1);
+    assertEquals(cs1.getSpaceConsumed(), file1Len * REPLICATION);
+    assertEquals(cs1.getTypeConsumed(StorageType.SSD), file1Len);
+    assertEquals(cs1.getTypeConsumed(StorageType.DISK), file1Len * 2);
+
     // Delete the snapshot s1
     dfs.deleteSnapshot(sub1, "s1");
 
@@ -566,6 +587,11 @@ public class TestQuotaByStorageType {
     sub1Node.computeQuotaUsage(fsn.getBlockManager().getStoragePolicySuite(), counts2, true);
     assertEquals(sub1Node.dumpTreeRecursively().toString(), 0,
         counts2.getTypeSpaces().get(StorageType.SSD));
+
+    ContentSummary cs2 = dfs.getContentSummary(sub1);
+    assertEquals(cs2.getSpaceConsumed(), 0);
+    assertEquals(cs2.getTypeConsumed(StorageType.SSD), 0);
+    assertEquals(cs2.getTypeConsumed(StorageType.DISK), 0);
   }
 
   @Test(timeout = 60000)
@@ -601,6 +627,11 @@ public class TestQuotaByStorageType {
     ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
         .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
     assertEquals(newFile1Len, ssdConsumed);
+
+    ContentSummary cs = dfs.getContentSummary(foo);
+    assertEquals(cs.getSpaceConsumed(), newFile1Len * REPLICATION);
+    assertEquals(cs.getTypeConsumed(StorageType.SSD), newFile1Len);
+    assertEquals(cs.getTypeConsumed(StorageType.DISK), newFile1Len * 2);
   }
 
   @Test
@@ -701,6 +732,55 @@ public class TestQuotaByStorageType {
         .getDirectoryWithQuotaFeature()
         .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
     assertEquals(file1Len, ssdConsumedAfterNNRestart);
+  }
+
+  @Test(timeout = 60000)
+  public void testContentSummaryWithoutQuotaByStorageType() throws Exception {
+    final Path foo = new Path(dir, "foo");
+    Path createdFile1 = new Path(foo, "created_file1.data");
+    dfs.mkdirs(foo);
+
+    // set storage policy on directory "foo" to ONESSD
+    dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
 
+    INode fnode = fsdir.getINode4Write(foo.toString());
+    assertTrue(fnode.isDirectory());
+    assertTrue(!fnode.isQuotaSet());
+
+    // Create file of size 2 * BLOCKSIZE under directory "foo"
+    long file1Len = BLOCKSIZE * 2;
+    int bufLen = BLOCKSIZE / 16;
+    DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
+
+    // Verify getContentSummary without any quota set
+    ContentSummary cs = dfs.getContentSummary(foo);
+    assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
+    assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
+    assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
+  }
+
+  @Test(timeout = 60000)
+  public void testContentSummaryWithoutStoragePolicy() throws Exception {
+    final Path foo = new Path(dir, "foo");
+    Path createdFile1 = new Path(foo, "created_file1.data");
+    dfs.mkdirs(foo);
+
+    INode fnode = fsdir.getINode4Write(foo.toString());
+    assertTrue(fnode.isDirectory());
+    assertTrue(!fnode.isQuotaSet());
+
+    // Create file of size 2 * BLOCKSIZE under directory "foo"
+    long file1Len = BLOCKSIZE * 2;
+    int bufLen = BLOCKSIZE / 16;
+    DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
+
+    // Verify getContentSummary without any quota set
+    // Expect no type quota and usage information available
+    ContentSummary cs = dfs.getContentSummary(foo);
+    assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
+    for (StorageType t : StorageType.values()) {
+      assertEquals(cs.getTypeConsumed(t), 0);
+      assertEquals(cs.getTypeQuota(t), -1);
+    }
   }
 }
\ No newline at end of file