You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xk...@apache.org on 2019/08/08 17:31:42 UTC

[hadoop] branch branch-2 updated: HDFS-14034. Support getQuotaUsage API in WebHDFS. Contributed by Chao Sun.

This is an automated email from the ASF dual-hosted git repository.

xkrogen pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
     new ce12c8f  HDFS-14034. Support getQuotaUsage API in WebHDFS. Contributed by Chao Sun.
ce12c8f is described below

commit ce12c8fc54879ac985e73b104868495c3cdc80f2
Author: Erik Krogen <ek...@linkedin.com>
AuthorDate: Thu Aug 8 10:18:00 2019 -0700

    HDFS-14034. Support getQuotaUsage API in WebHDFS. Contributed by Chao Sun.
---
 .../org/apache/hadoop/hdfs/web/JsonUtilClient.java |  55 ++++++--
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  |  15 ++
 .../hadoop/hdfs/web/resources/GetOpParam.java      |   1 +
 .../hadoop/fs/http/client/HttpFSFileSystem.java    |  86 ++++++++++--
 .../apache/hadoop/fs/http/server/FSOperations.java |  84 +++++++++++-
 .../fs/http/server/HttpFSParametersProvider.java   |   1 +
 .../apache/hadoop/fs/http/server/HttpFSServer.java |   8 ++
 .../hadoop/fs/http/client/BaseTestHttpFSWith.java  |  76 +++++++++--
 .../web/resources/NamenodeWebHdfsMethods.java      |   7 +
 .../java/org/apache/hadoop/hdfs/web/JsonUtil.java  |  50 +++++--
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md       | 151 +++++++++++++++++++++
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java    |  42 ++++++
 12 files changed, 522 insertions(+), 54 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 0320614..1fb7dea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -389,25 +390,61 @@ class JsonUtilClient {
     final long length = ((Number) m.get("length")).longValue();
     final long fileCount = ((Number) m.get("fileCount")).longValue();
     final long directoryCount = ((Number) m.get("directoryCount")).longValue();
+    ContentSummary.Builder builder = new ContentSummary.Builder()
+        .length(length)
+        .fileCount(fileCount)
+        .directoryCount(directoryCount);
+    builder = buildQuotaUsage(builder, m, ContentSummary.Builder.class);
+    return builder.build();
+  }
+
+  /** Convert a JSON map to a QuotaUsage. */
+  static QuotaUsage toQuotaUsage(final Map<?, ?> json) {
+    if (json == null) {
+      return null;
+    }
+
+    final Map<?, ?> m = (Map<?, ?>) json.get(QuotaUsage.class.getSimpleName());
+    QuotaUsage.Builder builder = new QuotaUsage.Builder();
+    builder = buildQuotaUsage(builder, m, QuotaUsage.Builder.class);
+    return builder.build();
+  }
+
+  /**
+   * Given a builder for QuotaUsage, parse the provided map and
+   * construct the relevant fields. Return the updated builder.
+   */
+  private static <T extends QuotaUsage.Builder> T buildQuotaUsage(
+      T builder, Map<?, ?> m, Class<T> type) {
     final long quota = ((Number) m.get("quota")).longValue();
     final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
     final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
     final Map<?, ?> typem = (Map<?, ?>) m.get("typeQuota");
 
-    Builder contentSummaryBuilder = new ContentSummary.Builder().length(length)
-        .fileCount(fileCount).directoryCount(directoryCount).quota(quota)
-        .spaceConsumed(spaceConsumed).spaceQuota(spaceQuota);
+    T result = type.cast(builder
+        .quota(quota)
+        .spaceConsumed(spaceConsumed)
+        .spaceQuota(spaceQuota));
+
+    // ContentSummary doesn't set this so check before using it
+    if (m.get("fileAndDirectoryCount") != null) {
+      final long fileAndDirectoryCount =
+          ((Number) m.get("fileAndDirectoryCount")).longValue();
+      result = type.cast(result.fileAndDirectoryCount(fileAndDirectoryCount));
+    }
+
     if (typem != null) {
       for (StorageType t : StorageType.getTypesSupportingQuota()) {
-        Map<?, ?> type = (Map<?, ?>) typem.get(t.toString());
-        if (type != null) {
-          contentSummaryBuilder = contentSummaryBuilder.typeQuota(t,
-              ((Number) type.get("quota")).longValue()).typeConsumed(t,
-              ((Number) type.get("consumed")).longValue());
+        Map<?, ?> typeQuota = (Map<?, ?>) typem.get(t.toString());
+        if (typeQuota != null) {
+          result = type.cast(result.typeQuota(t,
+              ((Number) typeQuota.get("quota")).longValue()).typeConsumed(t,
+              ((Number) typeQuota.get("consumed")).longValue()));
         }
       }
     }
-    return contentSummaryBuilder.build();
+
+    return result;
   }
 
   /** Convert a Json map to a MD5MD5CRC32FileChecksum. */
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 6bbbf13..7d30967 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.GlobalStorageStatistics;
 import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.StorageStatistics;
 import org.apache.hadoop.hdfs.DFSOpsCountStatistics;
 import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
@@ -1846,6 +1847,20 @@ public class WebHdfsFileSystem extends FileSystem
   }
 
   @Override
+  public QuotaUsage getQuotaUsage(final Path p) throws IOException {
+    statistics.incrementReadOps(1);
+    storageStatistics.incrementOpCounter(OpType.GET_QUOTA_USAGE);
+
+    final HttpOpParam.Op op = GetOpParam.Op.GETQUOTAUSAGE;
+    return new FsPathResponseRunner<QuotaUsage>(op, p) {
+      @Override
+      QuotaUsage decodeResponse(Map<?, ?> json) {
+        return JsonUtilClient.toQuotaUsage(json);
+      }
+    }.run();
+  }
+
+  @Override
   public MD5MD5CRC32FileChecksum getFileChecksum(final Path p
   ) throws IOException {
     statistics.incrementReadOps(1);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
index f35c16b..85f7aa4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
@@ -28,6 +28,7 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
     GETFILESTATUS(false, HttpURLConnection.HTTP_OK),
     LISTSTATUS(false, HttpURLConnection.HTTP_OK),
     GETCONTENTSUMMARY(false, HttpURLConnection.HTTP_OK),
+    GETQUOTAUSAGE(false, HttpURLConnection.HTTP_OK),
     GETFILECHECKSUM(true, HttpURLConnection.HTTP_OK),
 
     GETHOMEDIRECTORY(false, HttpURLConnection.HTTP_OK),
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 2ea1357..6e39f5b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.Seekable;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttrCodec;
@@ -193,9 +194,16 @@ public class HttpFSFileSystem extends FileSystem
   public static final String CONTENT_SUMMARY_DIRECTORY_COUNT_JSON = "directoryCount";
   public static final String CONTENT_SUMMARY_FILE_COUNT_JSON = "fileCount";
   public static final String CONTENT_SUMMARY_LENGTH_JSON = "length";
-  public static final String CONTENT_SUMMARY_QUOTA_JSON = "quota";
-  public static final String CONTENT_SUMMARY_SPACE_CONSUMED_JSON = "spaceConsumed";
-  public static final String CONTENT_SUMMARY_SPACE_QUOTA_JSON = "spaceQuota";
+
+  public static final String QUOTA_USAGE_JSON = "QuotaUsage";
+  public static final String QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON =
+      "fileAndDirectoryCount";
+  public static final String QUOTA_USAGE_QUOTA_JSON = "quota";
+  public static final String QUOTA_USAGE_SPACE_CONSUMED_JSON = "spaceConsumed";
+  public static final String QUOTA_USAGE_SPACE_QUOTA_JSON = "spaceQuota";
+  public static final String QUOTA_USAGE_CONSUMED_JSON = "consumed";
+  public static final String QUOTA_USAGE_TYPE_QUOTA_JSON = "typeQuota";
+
 
   public static final String ACL_STATUS_JSON = "AclStatus";
   public static final String ACL_STICKY_BIT_JSON = "stickyBit";
@@ -223,8 +231,9 @@ public class HttpFSFileSystem extends FileSystem
   public static enum Operation {
     OPEN(HTTP_GET), GETFILESTATUS(HTTP_GET), LISTSTATUS(HTTP_GET),
     GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
-    GETFILECHECKSUM(HTTP_GET),  GETFILEBLOCKLOCATIONS(HTTP_GET),
-    INSTRUMENTATION(HTTP_GET), GETACLSTATUS(HTTP_GET), GETTRASHROOT(HTTP_GET),
+    GETQUOTAUSAGE(HTTP_GET), GETFILECHECKSUM(HTTP_GET),
+    GETFILEBLOCKLOCATIONS(HTTP_GET), INSTRUMENTATION(HTTP_GET),
+    GETACLSTATUS(HTTP_GET), GETTRASHROOT(HTTP_GET),
     APPEND(HTTP_POST), CONCAT(HTTP_POST), TRUNCATE(HTTP_POST),
     CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT),
     SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT),
@@ -1122,14 +1131,65 @@ public class HttpFSFileSystem extends FileSystem
       getConnection(Operation.GETCONTENTSUMMARY.getMethod(), params, f, true);
     HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
     JSONObject json = (JSONObject) ((JSONObject)
-      HttpFSUtils.jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
-    return new ContentSummary.Builder().
-        length((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON)).
-        fileCount((Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON)).
-        directoryCount((Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON)).
-        quota((Long) json.get(CONTENT_SUMMARY_QUOTA_JSON)).
-        spaceConsumed((Long) json.get(CONTENT_SUMMARY_SPACE_CONSUMED_JSON)).
-        spaceQuota((Long) json.get(CONTENT_SUMMARY_SPACE_QUOTA_JSON)).build();
+        HttpFSUtils.jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
+    ContentSummary.Builder builder = new ContentSummary.Builder()
+        .length((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON))
+        .fileCount((Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON))
+        .directoryCount((Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON));
+    builder = buildQuotaUsage(builder, json, ContentSummary.Builder.class);
+    return builder.build();
+  }
+
+  @Override
+  public QuotaUsage getQuotaUsage(Path f) throws IOException {
+    Map<String, String> params = new HashMap<>();
+    params.put(OP_PARAM, Operation.GETQUOTAUSAGE.toString());
+    HttpURLConnection conn =
+        getConnection(Operation.GETQUOTAUSAGE.getMethod(), params, f, true);
+    JSONObject json = (JSONObject) ((JSONObject)
+        HttpFSUtils.jsonParse(conn)).get(QUOTA_USAGE_JSON);
+    QuotaUsage.Builder builder = new QuotaUsage.Builder();
+    builder = buildQuotaUsage(builder, json, QuotaUsage.Builder.class);
+    return builder.build();
+  }
+
+  /**
+   * Given a builder for QuotaUsage, parse the provided JSON object and
+   * construct the relevant fields. Return the updated builder.
+   */
+  private static <T extends QuotaUsage.Builder> T buildQuotaUsage(
+      T builder, JSONObject json, Class<T> type) {
+    long quota = (Long) json.get(QUOTA_USAGE_QUOTA_JSON);
+    long spaceConsumed = (Long) json.get(QUOTA_USAGE_SPACE_CONSUMED_JSON);
+    long spaceQuota = (Long) json.get(QUOTA_USAGE_SPACE_QUOTA_JSON);
+    JSONObject typeJson = (JSONObject) json.get(QUOTA_USAGE_TYPE_QUOTA_JSON);
+
+    builder = type.cast(builder
+        .quota(quota)
+        .spaceConsumed(spaceConsumed)
+        .spaceQuota(spaceQuota)
+    );
+
+    // ContentSummary doesn't set this so check before using it
+    if (json.get(QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON) != null) {
+      long fileAndDirectoryCount = (Long)
+          json.get(QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON);
+      builder = type.cast(builder.fileAndDirectoryCount(fileAndDirectoryCount));
+    }
+
+    if (typeJson != null) {
+      for (StorageType t : StorageType.getTypesSupportingQuota()) {
+        JSONObject typeQuota = (JSONObject) typeJson.get(t.toString());
+        if (typeQuota != null) {
+          builder = type.cast(builder
+              .typeQuota(t, ((Long) typeQuota.get(QUOTA_USAGE_QUOTA_JSON)))
+              .typeConsumed(t, ((Long) typeQuota.get(QUOTA_USAGE_CONSUMED_JSON))
+          ));
+        }
+      }
+    }
+
+    return builder;
   }
 
   @Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 8fb45d0..d389d0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FilterFileSystem;
 import org.apache.hadoop.fs.GlobFilter;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.XAttrSetFlag;
@@ -36,6 +37,7 @@ import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.web.JsonUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.lib.service.FileSystemAccess;
@@ -53,6 +55,7 @@ import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.TreeMap;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTPFS_BUFFER_SIZE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTP_BUFFER_SIZE_DEFAULT;
@@ -240,18 +243,67 @@ public class FSOperations {
   @SuppressWarnings({"unchecked"})
   private static Map contentSummaryToJSON(ContentSummary contentSummary) {
     Map json = new LinkedHashMap();
-    json.put(HttpFSFileSystem.CONTENT_SUMMARY_DIRECTORY_COUNT_JSON, contentSummary.getDirectoryCount());
-    json.put(HttpFSFileSystem.CONTENT_SUMMARY_FILE_COUNT_JSON, contentSummary.getFileCount());
-    json.put(HttpFSFileSystem.CONTENT_SUMMARY_LENGTH_JSON, contentSummary.getLength());
-    json.put(HttpFSFileSystem.CONTENT_SUMMARY_QUOTA_JSON, contentSummary.getQuota());
-    json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_CONSUMED_JSON, contentSummary.getSpaceConsumed());
-    json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_QUOTA_JSON, contentSummary.getSpaceQuota());
+    json.put(HttpFSFileSystem.CONTENT_SUMMARY_DIRECTORY_COUNT_JSON,
+        contentSummary.getDirectoryCount());
+    json.put(HttpFSFileSystem.CONTENT_SUMMARY_FILE_COUNT_JSON,
+        contentSummary.getFileCount());
+    json.put(HttpFSFileSystem.CONTENT_SUMMARY_LENGTH_JSON,
+        contentSummary.getLength());
+    Map<String, Object> quotaUsageMap = quotaUsageToMap(contentSummary);
+    for (Map.Entry<String, Object> e : quotaUsageMap.entrySet()) {
+      // For ContentSummary we don't need this since we already have
+      // separate count for file and directory.
+      if (!e.getKey().equals(
+          HttpFSFileSystem.QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON)) {
+        json.put(e.getKey(), e.getValue());
+      }
+    }
     Map response = new LinkedHashMap();
     response.put(HttpFSFileSystem.CONTENT_SUMMARY_JSON, json);
     return response;
   }
 
   /**
+   * Converts a <code>QuotaUsage</code> object into a JSON array
+   * object.
+   */
+  @SuppressWarnings({"unchecked"})
+  private static Map quotaUsageToJSON(QuotaUsage quotaUsage) {
+    Map response = new LinkedHashMap();
+    Map quotaUsageMap = quotaUsageToMap(quotaUsage);
+    response.put(HttpFSFileSystem.QUOTA_USAGE_JSON, quotaUsageMap);
+    return response;
+  }
+
+  private static Map<String, Object> quotaUsageToMap(QuotaUsage quotaUsage) {
+    Map<String, Object> result = new LinkedHashMap<>();
+    result.put(HttpFSFileSystem.QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON,
+        quotaUsage.getFileAndDirectoryCount());
+    result.put(HttpFSFileSystem.QUOTA_USAGE_QUOTA_JSON, quotaUsage.getQuota());
+    result.put(HttpFSFileSystem.QUOTA_USAGE_SPACE_CONSUMED_JSON,
+        quotaUsage.getSpaceConsumed());
+    result.put(HttpFSFileSystem.QUOTA_USAGE_SPACE_QUOTA_JSON,
+        quotaUsage.getSpaceQuota());
+    Map<String, Map<String, Long>> typeQuota = new TreeMap<>();
+    for (StorageType t : StorageType.getTypesSupportingQuota()) {
+      long tQuota = quotaUsage.getTypeQuota(t);
+      if (tQuota != HdfsConstants.QUOTA_RESET) {
+        Map<String, Long> type = typeQuota.get(t.toString());
+        if (type == null) {
+          type = new TreeMap<>();
+          typeQuota.put(t.toString(), type);
+        }
+        type.put(HttpFSFileSystem.QUOTA_USAGE_QUOTA_JSON,
+            quotaUsage.getTypeQuota(t));
+        type.put(HttpFSFileSystem.QUOTA_USAGE_CONSUMED_JSON,
+            quotaUsage.getTypeConsumed(t));
+      }
+    }
+    result.put(HttpFSFileSystem.QUOTA_USAGE_TYPE_QUOTA_JSON, typeQuota);
+    return result;
+  }
+
+  /**
    * Converts an object into a Json Map with with one key-value entry.
    * <p/>
    * It assumes the given value is either a JSON primitive type or a
@@ -465,6 +517,26 @@ public class FSOperations {
   }
 
   /**
+   * Executor that performs a quota-usage FileSystemAccess files system
+   * operation.
+   */
+  @InterfaceAudience.Private
+  public static class FSQuotaUsage
+      implements FileSystemAccess.FileSystemExecutor<Map> {
+    private Path path;
+
+    public FSQuotaUsage(String path) {
+      this.path = new Path(path);
+    }
+
+    @Override
+    public Map execute(FileSystem fs) throws IOException {
+      QuotaUsage quotaUsage = fs.getQuotaUsage(path);
+      return quotaUsageToJSON(quotaUsage);
+    }
+  }
+
+  /**
    * Executor that performs a create FileSystemAccess files system operation.
    */
   @InterfaceAudience.Private
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
index 5f265c0..e8e75fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
@@ -57,6 +57,7 @@ public class HttpFSParametersProvider extends ParametersProvider {
     PARAMS_DEF.put(Operation.LISTSTATUS, new Class[]{FilterParam.class});
     PARAMS_DEF.put(Operation.GETHOMEDIRECTORY, new Class[]{});
     PARAMS_DEF.put(Operation.GETCONTENTSUMMARY, new Class[]{});
+    PARAMS_DEF.put(Operation.GETQUOTAUSAGE, new Class[]{});
     PARAMS_DEF.put(Operation.GETFILECHECKSUM, new Class[]{});
     PARAMS_DEF.put(Operation.GETFILEBLOCKLOCATIONS,
         new Class[] {OffsetParam.class, LenParam.class});
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
index c0459c0..3d53cc5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
@@ -288,6 +288,14 @@ public class HttpFSServer {
       response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
       break;
     }
+    case GETQUOTAUSAGE: {
+      FSOperations.FSQuotaUsage command =
+          new FSOperations.FSQuotaUsage(path);
+      Map json = fsExecute(user, command);
+      AUDIT_LOG.info("[{}]", path);
+      response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+      break;
+    }
     case GETFILECHECKSUM: {
       FSOperations.FSFileChecksum command =
           new FSOperations.FSFileChecksum(path);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
index 89555c3..9c9ece8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
@@ -23,12 +23,16 @@ import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
@@ -629,17 +633,56 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     fs = getHttpFSFileSystem();
     ContentSummary httpContentSummary = fs.getContentSummary(path);
     fs.close();
-    assertEquals(httpContentSummary.getDirectoryCount(),
-        hdfsContentSummary.getDirectoryCount());
-    assertEquals(httpContentSummary.getFileCount(),
-        hdfsContentSummary.getFileCount());
-    assertEquals(httpContentSummary.getLength(),
-        hdfsContentSummary.getLength());
-    assertEquals(httpContentSummary.getQuota(), hdfsContentSummary.getQuota());
-    assertEquals(httpContentSummary.getSpaceConsumed(),
-        hdfsContentSummary.getSpaceConsumed());
-    assertEquals(httpContentSummary.getSpaceQuota(),
-        hdfsContentSummary.getSpaceQuota());
+    assertEquals(hdfsContentSummary.getDirectoryCount(),
+        httpContentSummary.getDirectoryCount());
+    assertEquals(hdfsContentSummary.getFileCount(),
+        httpContentSummary.getFileCount());
+    assertEquals(hdfsContentSummary.getLength(),
+        httpContentSummary.getLength());
+    assertEquals(hdfsContentSummary.getQuota(), httpContentSummary.getQuota());
+    assertEquals(hdfsContentSummary.getSpaceConsumed(),
+        httpContentSummary.getSpaceConsumed());
+    assertEquals(hdfsContentSummary.getSpaceQuota(),
+        httpContentSummary.getSpaceQuota());
+  }
+
+  private void testQuotaUsage() throws Exception {
+    if (isLocalFS()) {
+      // LocalFS doesn't support setQuota so skip here
+      return;
+    }
+
+    DistributedFileSystem dfs =
+        (DistributedFileSystem) FileSystem.get(getProxiedFSConf());
+    Path path = new Path(getProxiedFSTestDir(), "foo");
+    dfs.mkdirs(path);
+    dfs.setQuota(path, 20, 600 * 1024 * 1024);
+    for (int i = 0; i < 10; i++) {
+      dfs.createNewFile(new Path(path, "test_file_" + i));
+    }
+    FSDataOutputStream out = dfs.create(new Path(path, "test_file"));
+    out.writeUTF("Hello World");
+    out.close();
+
+    dfs.setQuotaByStorageType(path, StorageType.SSD, 100000);
+    dfs.setQuotaByStorageType(path, StorageType.DISK, 200000);
+
+    QuotaUsage hdfsQuotaUsage = dfs.getQuotaUsage(path);
+    dfs.close();
+    FileSystem fs = getHttpFSFileSystem();
+    QuotaUsage httpQuotaUsage = fs.getQuotaUsage(path);
+    fs.close();
+    assertEquals(hdfsQuotaUsage.getFileAndDirectoryCount(),
+        httpQuotaUsage.getFileAndDirectoryCount());
+    assertEquals(hdfsQuotaUsage.getQuota(), httpQuotaUsage.getQuota());
+    assertEquals(hdfsQuotaUsage.getSpaceConsumed(),
+        httpQuotaUsage.getSpaceConsumed());
+    assertEquals(hdfsQuotaUsage.getSpaceQuota(),
+        httpQuotaUsage.getSpaceQuota());
+    assertEquals(hdfsQuotaUsage.getTypeQuota(StorageType.SSD),
+        httpQuotaUsage.getTypeQuota(StorageType.SSD));
+    assertEquals(hdfsQuotaUsage.getTypeQuota(StorageType.DISK),
+        httpQuotaUsage.getTypeQuota(StorageType.DISK));
   }
   
   /** Set xattr */
@@ -1018,10 +1061,10 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
   protected enum Operation {
     GET, OPEN, CREATE, APPEND, TRUNCATE, CONCAT, RENAME, DELETE, LIST_STATUS,
     WORKING_DIRECTORY, MKDIRS, SET_TIMES, SET_PERMISSION, SET_OWNER,
-    SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, FILEACLS, DIRACLS, SET_XATTR,
-    GET_XATTRS, REMOVE_XATTR, LIST_XATTRS, ENCRYPTION, LIST_STATUS_BATCH,
-    GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING, GETFILEBLOCKLOCATIONS,
-    CREATE_SNAPSHOT, RENAME_SNAPSHOT, DELETE_SNAPSHOT
+    SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, QUOTA_USAGE, FILEACLS, DIRACLS,
+    SET_XATTR, GET_XATTRS, REMOVE_XATTR, LIST_XATTRS, ENCRYPTION,
+    LIST_STATUS_BATCH, GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING,
+    GETFILEBLOCKLOCATIONS, CREATE_SNAPSHOT, RENAME_SNAPSHOT, DELETE_SNAPSHOT
   }
 
   private void operation(Operation op) throws Exception {
@@ -1077,6 +1120,9 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     case CONTENT_SUMMARY:
       testContentSummary();
       break;
+    case QUOTA_USAGE:
+      testQuotaUsage();
+      break;
     case FILEACLS:
       testFileAcls();
       break;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index f2c99a0..1ddcd3e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -57,6 +57,7 @@ import javax.ws.rs.core.Response.Status;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
@@ -1061,6 +1062,12 @@ public class NamenodeWebHdfsMethods {
       final String js = JsonUtil.toJsonString(contentsummary);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
+    case GETQUOTAUSAGE:
+    {
+      final QuotaUsage quotaUsage = cp.getQuotaUsage(fullpath);
+      final String js = JsonUtil.toJsonString(quotaUsage);
+      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+    }
     case GETFILECHECKSUM:
     {
       final NameNode namenode = (NameNode)context.getAttribute("name.node");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index ae12914..585bf0f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -17,7 +17,16 @@
  */
 package org.apache.hadoop.hdfs.web;
 
-import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
+import org.apache.hadoop.fs.QuotaUsage;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -316,25 +325,44 @@ public class JsonUtil {
     m.put("length", contentsummary.getLength());
     m.put("fileCount", contentsummary.getFileCount());
     m.put("directoryCount", contentsummary.getDirectoryCount());
-    m.put("quota", contentsummary.getQuota());
-    m.put("spaceConsumed", contentsummary.getSpaceConsumed());
-    m.put("spaceQuota", contentsummary.getSpaceQuota());
-    final Map<String, Map<String, Long>> typeQuota =
-        new TreeMap<String, Map<String, Long>>();
+    // For ContentSummary we don't need this since we already have
+    // separate count for file and directory.
+    m.putAll(toJsonMap(contentsummary, false));
+    return toJsonString(ContentSummary.class, m);
+  }
+
+  /** Convert a QuotaUsage to a JSON string. */
+  public static String toJsonString(final QuotaUsage quotaUsage) {
+    if (quotaUsage == null) {
+      return null;
+    }
+    return toJsonString(QuotaUsage.class, toJsonMap(quotaUsage, true));
+  }
+
+  private static Map<String, Object> toJsonMap(
+      final QuotaUsage quotaUsage, boolean includeFileAndDirectoryCount) {
+    final Map<String, Object> m = new TreeMap<>();
+    if (includeFileAndDirectoryCount) {
+      m.put("fileAndDirectoryCount", quotaUsage.getFileAndDirectoryCount());
+    }
+    m.put("quota", quotaUsage.getQuota());
+    m.put("spaceConsumed", quotaUsage.getSpaceConsumed());
+    m.put("spaceQuota", quotaUsage.getSpaceQuota());
+    final Map<String, Map<String, Long>> typeQuota = new TreeMap<>();
     for (StorageType t : StorageType.getTypesSupportingQuota()) {
-      long tQuota = contentsummary.getTypeQuota(t);
+      long tQuota = quotaUsage.getTypeQuota(t);
       if (tQuota != HdfsConstants.QUOTA_RESET) {
         Map<String, Long> type = typeQuota.get(t.toString());
         if (type == null) {
-          type = new TreeMap<String, Long>();
+          type = new TreeMap<>();
           typeQuota.put(t.toString(), type);
         }
-        type.put("quota", contentsummary.getTypeQuota(t));
-        type.put("consumed", contentsummary.getTypeConsumed(t));
+        type.put("quota", quotaUsage.getTypeQuota(t));
+        type.put("consumed", quotaUsage.getTypeConsumed(t));
       }
     }
     m.put("typeQuota", typeQuota);
-    return toJsonString(ContentSummary.class, m);
+    return m;
   }
 
   /** Convert a MD5MD5CRC32FileChecksum to a Json string. */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 20dd8b3..e7d77dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -39,6 +39,7 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop
     * [`LISTSTATUS`](#List_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatus)
     * [`LISTSTATUS_BATCH`](#Iteratively_List_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatusIterator)
     * [`GETCONTENTSUMMARY`](#Get_Content_Summary_of_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getContentSummary)
+    * [`GETQUOTAUSAGE`](#Get_Quota_Usage_of_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getQuotaUsage)
     * [`GETFILECHECKSUM`](#Get_File_Checksum) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileChecksum)
     * [`GETHOMEDIRECTORY`](#Get_Home_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getHomeDirectory)
     * [`GETDELEGATIONTOKEN`](#Get_Delegation_Token) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getDelegationToken)
@@ -674,6 +675,48 @@ Other File System Operations
 
 See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getContentSummary
 
+### Get Quota Usage of a Directory
+
+* Submit a HTTP GET request.
+
+        curl -i "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETQUOTAUSAGE"
+
+    The client receives a response with a [`QuotaUsage` JSON object](#QuotaUsage_JSON_Schema):
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+        Transfer-Encoding: chunked
+
+        {
+          "QuotaUsage":
+          {
+            "fileAndDirectoryCount": 1,
+            "quota"         : 100,
+            "spaceConsumed" : 24930,
+            "spaceQuota"    : 100000,
+            "typeQuota":
+            {
+              "ARCHIVE":
+              {
+                "consumed": 500,
+                "quota": 10000
+              },
+              "DISK":
+              {
+                "consumed": 500,
+                "quota": 10000
+              },
+              "SSD":
+              {
+                "consumed": 500,
+                "quota": 10000
+              }
+            }
+          }
+        }
+
+See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getQuotaUsage
+
 ### Get File Checksum
 
 * Submit a HTTP GET request.
@@ -1663,6 +1706,114 @@ See also: [`MKDIRS`](#Make_a_Directory), [`RENAME`](#Rename_a_FileDirectory), [`
 
 See also: [`GETCONTENTSUMMARY`](#Get_Content_Summary_of_a_Directory)
 
+### QuotaUsage JSON Schema
+
+```json
+{
+  "name"      : "QuotaUsage",
+  "properties":
+  {
+    "QuotaUsage":
+    {
+      "type"      : "object",
+      "properties":
+      {
+        "fileAndDirectoryCount":
+        {
+          "description": "The number of files and directories.",
+          "type"       : "integer",
+          "required"   : true
+        },
+        "quota":
+        {
+          "description": "The namespace quota of this directory.",
+          "type"       : "integer",
+          "required"   : true
+        },
+        "spaceConsumed":
+        {
+          "description": "The disk space consumed by the content.",
+          "type"       : "integer",
+          "required"   : true
+        },
+        "spaceQuota":
+        {
+          "description": "The disk space quota.",
+          "type"       : "integer",
+          "required"   : true
+        },
+        "typeQuota":
+        {
+          "type"      : "object",
+          "properties":
+          {
+            "ARCHIVE":
+            {
+              "type"      : "object",
+              "properties":
+              {
+                "consumed":
+                {
+                  "description": "The storage type space consumed.",
+                  "type"       : "integer",
+                  "required"   : true
+                },
+                "quota":
+                {
+                  "description": "The storage type quota.",
+                  "type"       : "integer",
+                  "required"   : true
+                }
+              }
+            },
+            "DISK":
+            {
+              "type"      : "object",
+              "properties":
+              {
+                "consumed":
+                {
+                  "description": "The storage type space consumed.",
+                  "type"       : "integer",
+                  "required"   : true
+                },
+                "quota":
+                {
+                  "description": "The storage type quota.",
+                  "type"       : "integer",
+                  "required"   : true
+                }
+              }
+            },
+            "SSD":
+            {
+              "type"      : "object",
+              "properties":
+              {
+                "consumed":
+                {
+                  "description": "The storage type space consumed.",
+                  "type"       : "integer",
+                  "required"   : true
+                },
+                "quota":
+                {
+                  "description": "The storage type quota.",
+                  "type"       : "integer",
+                  "required"   : true
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+}
+```
+
+See also: [`GETQUOTAUSAGE`](#Get_Quota_Usage_of_a_Directory)
+
 ### FileChecksum JSON Schema
 
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index d5d83a1..3e25a16 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -72,6 +72,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -863,6 +864,47 @@ public class TestWebHDFS {
   }
 
   @Test
+  public void testQuotaUsage() throws Exception {
+    MiniDFSCluster cluster = null;
+    final Configuration conf = WebHdfsTestUtil.createConf();
+    final Path path = new Path("/TestDir");
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+      final WebHdfsFileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(
+          conf, WebHdfsConstants.WEBHDFS_SCHEME);
+      final DistributedFileSystem dfs = cluster.getFileSystem();
+
+      final long nsQuota = 100;
+      final long spaceQuota = 600 * 1024 * 1024;
+      final long diskQuota = 100000;
+      final byte[] bytes = {0x0, 0x1, 0x2, 0x3};
+
+      dfs.mkdirs(path);
+      dfs.setQuota(path, nsQuota, spaceQuota);
+      for (int i = 0; i < 10; i++) {
+        dfs.createNewFile(new Path(path, "test_file_" + i));
+      }
+      FSDataOutputStream out = dfs.create(new Path(path, "test_file"));
+      out.write(bytes);
+      out.close();
+
+      dfs.setQuotaByStorageType(path, StorageType.DISK, diskQuota);
+
+      QuotaUsage quotaUsage = webHdfs.getQuotaUsage(path);
+      assertEquals(12, quotaUsage.getFileAndDirectoryCount());
+      assertEquals(nsQuota, quotaUsage.getQuota());
+      assertEquals(bytes.length * dfs.getDefaultReplication(), quotaUsage.getSpaceConsumed());
+      assertEquals(spaceQuota, quotaUsage.getSpaceQuota());
+      assertEquals(diskQuota, quotaUsage.getTypeQuota(StorageType.DISK));
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+
+  }
+
+  @Test
   public void testWebHdfsPread() throws Exception {
     final Configuration conf = WebHdfsTestUtil.createConf();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org