You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zj...@apache.org on 2015/04/18 00:35:44 UTC

[16/50] [abbrv] hadoop git commit: HDFS-7701. Support reporting per storage type quota and usage with hadoop/hdfs shell. (Contributed by Peter Shi)

HDFS-7701. Support reporting per storage type quota and usage with hadoop/hdfs shell. (Contributed by Peter Shi)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40b72486
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40b72486
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40b72486

Branch: refs/heads/YARN-2928
Commit: 40b72486acc967e8650b4949f7ea4b1d0c2f22e6
Parents: ae7a5ff
Author: Arpit Agarwal <ar...@apache.org>
Authored: Mon Apr 13 21:01:15 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Fri Apr 17 15:29:42 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/fs/ContentSummary.java    |  89 ++++++++++--
 .../apache/hadoop/fs/shell/CommandFormat.java   |  49 ++++++-
 .../java/org/apache/hadoop/fs/shell/Count.java  |  73 +++++++++-
 .../org/apache/hadoop/fs/shell/TestCount.java   | 142 ++++++++++++++++++-
 .../src/test/resources/testConf.xml             |   2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 6 files changed, 334 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40b72486/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
index 66137d0..ccd6960 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.fs;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.util.List;
 
-import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Writable;
@@ -255,6 +255,8 @@ public class ContentSummary implements Writable{
   private static final String QUOTA_SUMMARY_FORMAT = "%12s %15s ";
   private static final String SPACE_QUOTA_SUMMARY_FORMAT = "%15s %15s ";
 
+  private static final String STORAGE_TYPE_SUMMARY_FORMAT = "%13s %17s ";
+
   private static final String[] HEADER_FIELDS = new String[] { "DIR_COUNT",
       "FILE_COUNT", "CONTENT_SIZE"};
   private static final String[] QUOTA_HEADER_FIELDS = new String[] { "QUOTA",
@@ -268,7 +270,11 @@ public class ContentSummary implements Writable{
       QUOTA_SUMMARY_FORMAT + SPACE_QUOTA_SUMMARY_FORMAT,
       (Object[]) QUOTA_HEADER_FIELDS) +
       HEADER;
-  
+
+  /** default quota display string */
+  private static final String QUOTA_NONE = "none";
+  private static final String QUOTA_INF = "inf";
+
   /** Return the header of the output.
    * if qOption is false, output directory count, file count, and content size;
    * if qOption is true, output quota and remaining quota as well.
@@ -281,6 +287,26 @@ public class ContentSummary implements Writable{
   }
 
   /**
+   * return the header of with the StorageTypes
+   *
+   * @param storageTypes
+   * @return storage header string
+   */
+  public static String getStorageTypeHeader(List<StorageType> storageTypes) {
+    StringBuffer header = new StringBuffer();
+
+    for (StorageType st : storageTypes) {
+      /* the field length is 13/17 for quota and remain quota
+       * as the max length for quota name is ARCHIVE_QUOTA
+        * and remain quota name REM_ARCHIVE_QUOTA */
+      String storageName = st.toString();
+      header.append(String.format(STORAGE_TYPE_SUMMARY_FORMAT, storageName + "_QUOTA",
+          "REM_" + storageName + "_QUOTA"));
+    }
+    return header.toString();
+  }
+
+  /**
    * Returns the names of the fields from the summary header.
    * 
    * @return names of fields as displayed in the header
@@ -325,13 +351,49 @@ public class ContentSummary implements Writable{
    * @return the string representation of the object
    */
   public String toString(boolean qOption, boolean hOption) {
+    return toString(qOption, hOption, false, null);
+  }
+
+  /**
+   * Return the string representation of the object in the output format.
+   * if tOption is true, display the quota by storage types,
+   * Otherwise, same logic with #toString(boolean,boolean)
+   *
+   * @param qOption a flag indicating if quota needs to be printed or not
+   * @param hOption a flag indicating if human readable output if to be used
+   * @param tOption a flag indicating if display quota by storage types
+   * @param types Storage types to display
+   * @return the string representation of the object
+   */
+  public String toString(boolean qOption, boolean hOption,
+                         boolean tOption, List<StorageType> types) {
     String prefix = "";
+
+    if (tOption) {
+      StringBuffer content = new StringBuffer();
+      for (StorageType st : types) {
+        long typeQuota = getTypeQuota(st);
+        long typeConsumed = getTypeConsumed(st);
+        String quotaStr = QUOTA_NONE;
+        String quotaRem = QUOTA_INF;
+
+        if (typeQuota > 0) {
+          quotaStr = formatSize(typeQuota, hOption);
+          quotaRem = formatSize(typeQuota - typeConsumed, hOption);
+        }
+
+        content.append(String.format(STORAGE_TYPE_SUMMARY_FORMAT,
+            quotaStr, quotaRem));
+      }
+      return content.toString();
+    }
+
     if (qOption) {
-      String quotaStr = "none";
-      String quotaRem = "inf";
-      String spaceQuotaStr = "none";
-      String spaceQuotaRem = "inf";
-      
+      String quotaStr = QUOTA_NONE;
+      String quotaRem = QUOTA_INF;
+      String spaceQuotaStr = QUOTA_NONE;
+      String spaceQuotaRem = QUOTA_INF;
+
       if (quota>0) {
         quotaStr = formatSize(quota, hOption);
         quotaRem = formatSize(quota-(directoryCount+fileCount), hOption);
@@ -340,16 +402,17 @@ public class ContentSummary implements Writable{
         spaceQuotaStr = formatSize(spaceQuota, hOption);
         spaceQuotaRem = formatSize(spaceQuota - spaceConsumed, hOption);
       }
-      
+
       prefix = String.format(QUOTA_SUMMARY_FORMAT + SPACE_QUOTA_SUMMARY_FORMAT,
-                             quotaStr, quotaRem, spaceQuotaStr, spaceQuotaRem);
+          quotaStr, quotaRem, spaceQuotaStr, spaceQuotaRem);
     }
-    
+
     return prefix + String.format(SUMMARY_FORMAT,
-     formatSize(directoryCount, hOption),
-     formatSize(fileCount, hOption),
-     formatSize(length, hOption));
+        formatSize(directoryCount, hOption),
+        formatSize(fileCount, hOption),
+        formatSize(length, hOption));
   }
+
   /**
    * Formats a size to be human readable or in bytes
    * @param size value to be formatted

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40b72486/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
index e1aeea9..0f9aa38 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
@@ -31,6 +31,7 @@ import java.util.Set;
 public class CommandFormat {
   final int minPar, maxPar;
   final Map<String, Boolean> options = new HashMap<String, Boolean>();
+  final Map<String, String> optionsWithValue = new HashMap<String, String>();
   boolean ignoreUnknownOpts = false;
   
   /**
@@ -64,6 +65,18 @@ public class CommandFormat {
     }
   }
 
+  /**
+   * add option with value
+   *
+   * @param option option name
+   */
+  public void addOptionWithValue(String option) {
+    if (options.containsKey(option)) {
+      throw new DuplicatedOptionException(option);
+    }
+    optionsWithValue.put(option, null);
+  }
+
   /** Parse parameters starting from the given position
    * Consider using the variant that directly takes a List
    * 
@@ -99,6 +112,17 @@ public class CommandFormat {
       if (options.containsKey(opt)) {
         args.remove(pos);
         options.put(opt, Boolean.TRUE);
+      } else if (optionsWithValue.containsKey(opt)) {
+        args.remove(pos);
+        if (pos < args.size() && (args.size() > minPar)) {
+          arg = args.get(pos);
+          args.remove(pos);
+        } else {
+          arg = "";
+        }
+        if (!arg.startsWith("-") || arg.equals("-")) {
+          optionsWithValue.put(opt, arg);
+        }
       } else if (ignoreUnknownOpts) {
         pos++;
       } else {
@@ -122,7 +146,19 @@ public class CommandFormat {
   public boolean getOpt(String option) {
     return options.containsKey(option) ? options.get(option) : false;
   }
-  
+
+  /**
+   * get the option's value
+   *
+   * @param option option name
+   * @return option value
+   * if option exists, but no value assigned, return ""
+   * if option not exists, return null
+   */
+  public String getOptValue(String option) {
+    return optionsWithValue.get(option);
+  }
+
   /** Returns all the options that are set
    * 
    * @return Set<String> of the enabled options
@@ -203,4 +239,15 @@ public class CommandFormat {
       return option;
     }
   }
+
+  /**
+   * Used when a duplicated option is supplied to a command.
+   */
+  public static class DuplicatedOptionException extends IllegalArgumentException {
+    private static final long serialVersionUID = 0L;
+
+    public DuplicatedOptionException(String duplicatedOption) {
+      super("option " + duplicatedOption + " already exsits!");
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40b72486/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
index dd7d168..c615876 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
@@ -18,8 +18,10 @@
 package org.apache.hadoop.fs.shell;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.LinkedList;
+import java.util.List;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -27,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.StorageType;
 
 /**
  * Count the number of directories, files, bytes, quota, and remaining quota.
@@ -46,11 +49,12 @@ public class Count extends FsCommand {
   private static final String OPTION_QUOTA = "q";
   private static final String OPTION_HUMAN = "h";
   private static final String OPTION_HEADER = "v";
+  private static final String OPTION_TYPE = "t";
 
   public static final String NAME = "count";
   public static final String USAGE =
       "[-" + OPTION_QUOTA + "] [-" + OPTION_HUMAN + "] [-" + OPTION_HEADER
-          + "] <path> ...";
+          + "] [-" + OPTION_TYPE + " [<storage type>]] <path> ...";
   public static final String DESCRIPTION =
       "Count the number of directories, files and bytes under the paths\n" +
           "that match the specified file pattern.  The output columns are:\n" +
@@ -63,10 +67,19 @@ public class Count extends FsCommand {
           " PATHNAME\n" +
           "The -" + OPTION_HUMAN +
           " option shows file sizes in human readable format.\n" +
-          "The -" + OPTION_HEADER + " option displays a header line.";
+          "The -" + OPTION_HEADER + " option displays a header line.\n" +
+          "The -" + OPTION_TYPE + " option displays quota by storage types.\n" +
+          "It must be used with -" + OPTION_QUOTA + " option.\n" +
+          "If a comma-separated list of storage types is given after the -" +
+          OPTION_TYPE + " option, \n" +
+          "it displays the quota and usage for the specified types. \n" +
+          "Otherwise, it displays the quota and usage for all the storage \n" +
+          "types that support quota";
 
   private boolean showQuotas;
   private boolean humanReadable;
+  private boolean showQuotabyType;
+  private List<StorageType> storageTypes = null;
 
   /** Constructor */
   public Count() {}
@@ -87,21 +100,54 @@ public class Count extends FsCommand {
   protected void processOptions(LinkedList<String> args) {
     CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE,
         OPTION_QUOTA, OPTION_HUMAN, OPTION_HEADER);
+    cf.addOptionWithValue(OPTION_TYPE);
     cf.parse(args);
     if (args.isEmpty()) { // default path is the current working directory
       args.add(".");
     }
     showQuotas = cf.getOpt(OPTION_QUOTA);
     humanReadable = cf.getOpt(OPTION_HUMAN);
+
+    if (showQuotas) {
+      String types = cf.getOptValue(OPTION_TYPE);
+
+      if (null != types) {
+        showQuotabyType = true;
+        storageTypes = getAndCheckStorageTypes(types);
+      } else {
+        showQuotabyType = false;
+      }
+    }
+
     if (cf.getOpt(OPTION_HEADER)) {
-      out.println(ContentSummary.getHeader(showQuotas) + "PATHNAME");
+      if (showQuotabyType) {
+        out.println(ContentSummary.getStorageTypeHeader(storageTypes) + "PATHNAME");
+      } else {
+        out.println(ContentSummary.getHeader(showQuotas) + "PATHNAME");
+      }
     }
   }
 
+  private List<StorageType> getAndCheckStorageTypes(String types) {
+    if ("".equals(types) || "all".equalsIgnoreCase(types)) {
+      return StorageType.getTypesSupportingQuota();
+    }
+
+    String[] typeArray = StringUtils.split(types, ',');
+    List<StorageType> stTypes = new ArrayList<>();
+
+    for (String t : typeArray) {
+      stTypes.add(StorageType.parseStorageType(t));
+    }
+
+    return stTypes;
+  }
+
   @Override
   protected void processPath(PathData src) throws IOException {
     ContentSummary summary = src.fs.getContentSummary(src.path);
-    out.println(summary.toString(showQuotas, isHumanReadable()) + src);
+    out.println(summary.toString(showQuotas, isHumanReadable(),
+        showQuotabyType, storageTypes) + src);
   }
   
   /**
@@ -121,4 +167,23 @@ public class Count extends FsCommand {
   boolean isHumanReadable() {
     return humanReadable;
   }
+
+  /**
+   * should print quota by storage types
+   * @return true if enables quota by storage types
+   */
+  @InterfaceAudience.Private
+  boolean isShowQuotabyType() {
+    return showQuotabyType;
+  }
+
+  /**
+   * show specified storage types
+   * @return specified storagetypes
+   */
+  @InterfaceAudience.Private
+  List<StorageType> getStorageTypes() {
+    return storageTypes;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40b72486/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
index d5f097d..22d9a21 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
@@ -24,13 +24,15 @@ import java.io.PrintStream;
 import java.io.IOException;
 import java.net.URI;
 import java.util.LinkedList;
+import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FilterFileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FilterFileSystem;
 import org.apache.hadoop.fs.shell.CommandFormat.NotEnoughArgumentsException;
 import org.junit.Test;
 import org.junit.Before;
@@ -79,11 +81,17 @@ public class TestCount {
     LinkedList<String> options = new LinkedList<String>();
     options.add("-q");
     options.add("-h");
+    options.add("-t");
+    options.add("SSD");
     options.add("dummy");
     Count count = new Count();
     count.processOptions(options);
     assertTrue(count.isShowQuotas());
     assertTrue(count.isHumanReadable());
+    assertTrue(count.isShowQuotabyType());
+    assertEquals(1, count.getStorageTypes().size());
+    assertEquals(StorageType.SSD, count.getStorageTypes().get(0));
+
   }
 
   // check no options is handled correctly
@@ -254,6 +262,112 @@ public class TestCount {
   }
 
   @Test
+  public void processPathWithQuotasByStorageTypesHeader() throws Exception {
+    Path path = new Path("mockfs:/test");
+
+    when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
+
+    PrintStream out = mock(PrintStream.class);
+
+    Count count = new Count();
+    count.out = out;
+
+    LinkedList<String> options = new LinkedList<String>();
+    options.add("-q");
+    options.add("-v");
+    options.add("-t");
+    options.add("all");
+    options.add("dummy");
+    count.processOptions(options);
+    String withStorageTypeHeader =
+        // <----13---> <-------17------> <----13-----> <------17------->
+        "   DISK_QUOTA    REM_DISK_QUOTA     SSD_QUOTA     REM_SSD_QUOTA " +
+        // <----13---> <-------17------>
+        "ARCHIVE_QUOTA REM_ARCHIVE_QUOTA " +
+        "PATHNAME";
+    verify(out).println(withStorageTypeHeader);
+    verifyNoMoreInteractions(out);
+  }
+
+  @Test
+  public void processPathWithQuotasBySSDStorageTypesHeader() throws Exception {
+    Path path = new Path("mockfs:/test");
+
+    when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
+
+    PrintStream out = mock(PrintStream.class);
+
+    Count count = new Count();
+    count.out = out;
+
+    LinkedList<String> options = new LinkedList<String>();
+    options.add("-q");
+    options.add("-v");
+    options.add("-t");
+    options.add("SSD");
+    options.add("dummy");
+    count.processOptions(options);
+    String withStorageTypeHeader =
+        // <----13---> <-------17------>
+        "    SSD_QUOTA     REM_SSD_QUOTA " +
+        "PATHNAME";
+    verify(out).println(withStorageTypeHeader);
+    verifyNoMoreInteractions(out);
+  }
+
+  @Test
+  public void processPathWithQuotasByMultipleStorageTypesContent() throws Exception {
+    Path path = new Path("mockfs:/test");
+
+    when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
+    PathData pathData = new PathData(path.toString(), conf);
+
+    PrintStream out = mock(PrintStream.class);
+
+    Count count = new Count();
+    count.out = out;
+
+    LinkedList<String> options = new LinkedList<String>();
+    options.add("-q");
+    options.add("-t");
+    options.add("SSD,DISK");
+    options.add("dummy");
+    count.processOptions(options);
+    count.processPath(pathData);
+    String withStorageType = BYTES + StorageType.SSD.toString()
+        + " " + StorageType.DISK.toString() + " " + pathData.toString();
+    verify(out).println(withStorageType);
+    verifyNoMoreInteractions(out);
+  }
+
+  @Test
+  public void processPathWithQuotasByMultipleStorageTypes() throws Exception {
+    Path path = new Path("mockfs:/test");
+
+    when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
+
+    PrintStream out = mock(PrintStream.class);
+
+    Count count = new Count();
+    count.out = out;
+
+    LinkedList<String> options = new LinkedList<String>();
+    options.add("-q");
+    options.add("-v");
+    options.add("-t");
+    options.add("SSD,DISK");
+    options.add("dummy");
+    count.processOptions(options);
+    String withStorageTypeHeader =
+        // <----13---> <------17------->
+        "    SSD_QUOTA     REM_SSD_QUOTA " +
+        "   DISK_QUOTA    REM_DISK_QUOTA " +
+        "PATHNAME";
+    verify(out).println(withStorageTypeHeader);
+    verifyNoMoreInteractions(out);
+  }
+
+  @Test
   public void getCommandName() {
     Count count = new Count();
     String actual = count.getCommandName();
@@ -289,7 +403,7 @@ public class TestCount {
   public void getUsage() {
     Count count = new Count();
     String actual = count.getUsage();
-    String expected = "-count [-q] [-h] [-v] <path> ...";
+    String expected = "-count [-q] [-h] [-v] [-t [<storage type>]] <path> ...";
     assertEquals("Count.getUsage", expected, actual);
   }
 
@@ -306,7 +420,13 @@ public class TestCount {
         + "QUOTA REM_QUOTA SPACE_QUOTA REM_SPACE_QUOTA\n"
         + "      DIR_COUNT FILE_COUNT CONTENT_SIZE PATHNAME\n"
         + "The -h option shows file sizes in human readable format.\n"
-        + "The -v option displays a header line.";
+        + "The -v option displays a header line.\n"
+        + "The -t option displays quota by storage types.\n"
+        + "It must be used with -q option.\n"
+        + "If a comma-separated list of storage types is given after the -t option, \n"
+        + "it displays the quota and usage for the specified types. \n"
+        + "Otherwise, it displays the quota and usage for all the storage \n"
+        + "types that support quota";
 
     assertEquals("Count.getDescription", expected, actual);
   }
@@ -321,7 +441,19 @@ public class TestCount {
     }
 
     @Override
-    public String toString(boolean qOption, boolean hOption) {
+    public String toString(boolean qOption, boolean hOption,
+                           boolean tOption, List<StorageType> types) {
+      if (tOption) {
+        StringBuffer result = new StringBuffer();
+        result.append(hOption ? HUMAN : BYTES);
+
+        for (StorageType type : types) {
+          result.append(type.toString());
+          result.append(" ");
+        }
+        return result.toString();
+      }
+
       if (qOption) {
         if (hOption) {
           return (HUMAN + WITH_QUOTAS);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40b72486/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index ac28192..9b72960 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -262,7 +262,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-count \[-q\] \[-h\] \[-v\] &lt;path&gt; \.\.\. :( )*</expected-output>
+          <expected-output>^-count \[-q\] \[-h\] \[-v\] \[-t \[&lt;storage type&gt;\]\] &lt;path&gt; \.\.\. :( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40b72486/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1aaf42c..7414d33 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -491,6 +491,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8111. NPE thrown when invalid FSImage filename given for
     'hdfs oiv_legacy' cmd ( surendra singh lilhore via vinayakumarb )
 
+    HDFS-7701. Support reporting per storage type quota and usage
+    with hadoop/hdfs shell. (Peter Shi via Arpit Agarwal)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES