You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ar...@apache.org on 2013/11/21 21:06:11 UTC

svn commit: r1544306 [2/3] - in /hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/client/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/m...

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java Thu Nov 21 20:06:09 2013
@@ -47,7 +47,7 @@ public enum Content {
     }
 
     private Counts() {
-      super(Content.values());
+      super(Content.class);
     }
   }
 

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Thu Nov 21 20:06:09 2013
@@ -2407,8 +2407,9 @@ public class FSDirectory implements Clos
     if (dirNode.isRoot() && nsQuota == HdfsConstants.QUOTA_RESET) {
       throw new IllegalArgumentException("Cannot clear namespace quota on root.");
     } else { // a directory inode
-      long oldNsQuota = dirNode.getNsQuota();
-      long oldDsQuota = dirNode.getDsQuota();
+      final Quota.Counts oldQuota = dirNode.getQuotaCounts();
+      final long oldNsQuota = oldQuota.get(Quota.NAMESPACE);
+      final long oldDsQuota = oldQuota.get(Quota.DISKSPACE);
       if (nsQuota == HdfsConstants.QUOTA_DONT_SET) {
         nsQuota = oldNsQuota;
       }
@@ -2460,8 +2461,9 @@ public class FSDirectory implements Clos
     try {
       INodeDirectory dir = unprotectedSetQuota(src, nsQuota, dsQuota);
       if (dir != null) {
-        fsImage.getEditLog().logSetQuota(src, dir.getNsQuota(), 
-                                         dir.getDsQuota());
+        final Quota.Counts q = dir.getQuotaCounts();
+        fsImage.getEditLog().logSetQuota(src,
+            q.get(Quota.NAMESPACE), q.get(Quota.DISKSPACE));
       }
     } finally {
       writeUnlock();

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Thu Nov 21 20:06:09 2013
@@ -38,15 +38,15 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddPathBasedCacheDirectiveOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyPathBasedCacheDirectiveOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCacheDirectiveInfoOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCacheDirectiveInfoOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp;
@@ -63,7 +63,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemovePathBasedCacheDirectiveOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
@@ -954,27 +954,27 @@ public class FSEditLog implements LogsPu
     logEdit(op);
   }
   
-  void logAddPathBasedCacheDirective(PathBasedCacheDirective directive,
+  void logAddCacheDirectiveInfo(CacheDirectiveInfo directive,
       boolean toLogRpcIds) {
-    AddPathBasedCacheDirectiveOp op =
-        AddPathBasedCacheDirectiveOp.getInstance(cache.get())
+    AddCacheDirectiveInfoOp op =
+        AddCacheDirectiveInfoOp.getInstance(cache.get())
             .setDirective(directive);
     logRpcIds(op, toLogRpcIds);
     logEdit(op);
   }
 
-  void logModifyPathBasedCacheDirective(
-      PathBasedCacheDirective directive, boolean toLogRpcIds) {
-    ModifyPathBasedCacheDirectiveOp op =
-        ModifyPathBasedCacheDirectiveOp.getInstance(
+  void logModifyCacheDirectiveInfo(
+      CacheDirectiveInfo directive, boolean toLogRpcIds) {
+    ModifyCacheDirectiveInfoOp op =
+        ModifyCacheDirectiveInfoOp.getInstance(
             cache.get()).setDirective(directive);
     logRpcIds(op, toLogRpcIds);
     logEdit(op);
   }
 
-  void logRemovePathBasedCacheDirective(Long id, boolean toLogRpcIds) {
-    RemovePathBasedCacheDirectiveOp op =
-        RemovePathBasedCacheDirectiveOp.getInstance(cache.get()).setId(id);
+  void logRemoveCacheDirectiveInfo(Long id, boolean toLogRpcIds) {
+    RemoveCacheDirectiveInfoOp op =
+        RemoveCacheDirectiveInfoOp.getInstance(cache.get()).setId(id);
     logRpcIds(op, toLogRpcIds);
     logEdit(op);
   }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Thu Nov 21 20:06:09 2013
@@ -36,13 +36,13 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddPathBasedCacheDirectiveOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCacheDirectiveInfoOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.BlockListUpdatingOp;
@@ -56,10 +56,10 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyPathBasedCacheDirectiveOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCacheDirectiveInfoOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemovePathBasedCacheDirectiveOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
@@ -639,8 +639,8 @@ public class FSEditLogLoader {
       break;
     }
     case OP_ADD_PATH_BASED_CACHE_DIRECTIVE: {
-      AddPathBasedCacheDirectiveOp addOp = (AddPathBasedCacheDirectiveOp) op;
-      PathBasedCacheDirective result = fsNamesys.
+      AddCacheDirectiveInfoOp addOp = (AddCacheDirectiveInfoOp) op;
+      CacheDirectiveInfo result = fsNamesys.
           getCacheManager().addDirective(addOp.directive, null);
       if (toAddRetryCache) {
         Long id = result.getId();
@@ -649,8 +649,8 @@ public class FSEditLogLoader {
       break;
     }
     case OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE: {
-      ModifyPathBasedCacheDirectiveOp modifyOp =
-          (ModifyPathBasedCacheDirectiveOp) op;
+      ModifyCacheDirectiveInfoOp modifyOp =
+          (ModifyCacheDirectiveInfoOp) op;
       fsNamesys.getCacheManager().modifyDirective(
           modifyOp.directive, null);
       if (toAddRetryCache) {
@@ -659,8 +659,8 @@ public class FSEditLogLoader {
       break;
     }
     case OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE: {
-      RemovePathBasedCacheDirectiveOp removeOp =
-          (RemovePathBasedCacheDirectiveOp) op;
+      RemoveCacheDirectiveInfoOp removeOp =
+          (RemoveCacheDirectiveInfoOp) op;
       fsNamesys.getCacheManager().removeDirective(removeOp.id, null);
       if (toAddRetryCache) {
         fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Thu Nov 21 20:06:09 2013
@@ -86,7 +86,7 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.util.XMLUtils;
 import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
@@ -166,11 +166,11 @@ public abstract class FSEditLogOp {
       inst.put(OP_SET_GENSTAMP_V2, new SetGenstampV2Op());
       inst.put(OP_ALLOCATE_BLOCK_ID, new AllocateBlockIdOp());
       inst.put(OP_ADD_PATH_BASED_CACHE_DIRECTIVE,
-          new AddPathBasedCacheDirectiveOp());
+          new AddCacheDirectiveInfoOp());
       inst.put(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE,
-          new ModifyPathBasedCacheDirectiveOp());
+          new ModifyCacheDirectiveInfoOp());
       inst.put(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE,
-          new RemovePathBasedCacheDirectiveOp());
+          new RemoveCacheDirectiveInfoOp());
       inst.put(OP_ADD_CACHE_POOL, new AddCachePoolOp());
       inst.put(OP_MODIFY_CACHE_POOL, new ModifyCachePoolOp());
       inst.put(OP_REMOVE_CACHE_POOL, new RemoveCachePoolOp());
@@ -2868,22 +2868,22 @@ public abstract class FSEditLogOp {
 
   /**
    * {@literal @AtMostOnce} for
-   * {@link ClientProtocol#addPathBasedCacheDirective}
+   * {@link ClientProtocol#addCacheDirective}
    */
-  static class AddPathBasedCacheDirectiveOp extends FSEditLogOp {
-    PathBasedCacheDirective directive;
+  static class AddCacheDirectiveInfoOp extends FSEditLogOp {
+    CacheDirectiveInfo directive;
 
-    public AddPathBasedCacheDirectiveOp() {
+    public AddCacheDirectiveInfoOp() {
       super(OP_ADD_PATH_BASED_CACHE_DIRECTIVE);
     }
 
-    static AddPathBasedCacheDirectiveOp getInstance(OpInstanceCache cache) {
-      return (AddPathBasedCacheDirectiveOp) cache
+    static AddCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
+      return (AddCacheDirectiveInfoOp) cache
           .get(OP_ADD_PATH_BASED_CACHE_DIRECTIVE);
     }
 
-    public AddPathBasedCacheDirectiveOp setDirective(
-        PathBasedCacheDirective directive) {
+    public AddCacheDirectiveInfoOp setDirective(
+        CacheDirectiveInfo directive) {
       this.directive = directive;
       assert(directive.getId() != null);
       assert(directive.getPath() != null);
@@ -2898,7 +2898,7 @@ public abstract class FSEditLogOp {
       String path = FSImageSerialization.readString(in);
       short replication = FSImageSerialization.readShort(in);
       String pool = FSImageSerialization.readString(in);
-      directive = new PathBasedCacheDirective.Builder().
+      directive = new CacheDirectiveInfo.Builder().
           setId(id).
           setPath(new Path(path)).
           setReplication(replication).
@@ -2930,7 +2930,7 @@ public abstract class FSEditLogOp {
 
     @Override
     void fromXml(Stanza st) throws InvalidXmlException {
-      directive = new PathBasedCacheDirective.Builder().
+      directive = new CacheDirectiveInfo.Builder().
           setId(Long.parseLong(st.getValue("ID"))).
           setPath(new Path(st.getValue("PATH"))).
           setReplication(Short.parseShort(st.getValue("REPLICATION"))).
@@ -2942,7 +2942,7 @@ public abstract class FSEditLogOp {
     @Override
     public String toString() {
       StringBuilder builder = new StringBuilder();
-      builder.append("AddPathBasedCacheDirective [");
+      builder.append("AddCacheDirectiveInfo [");
       builder.append("id=" + directive.getId() + ",");
       builder.append("path=" + directive.getPath().toUri().getPath() + ",");
       builder.append("replication=" + directive.getReplication() + ",");
@@ -2955,22 +2955,22 @@ public abstract class FSEditLogOp {
 
   /**
    * {@literal @AtMostOnce} for
-   * {@link ClientProtocol#modifyPathBasedCacheDirective}
+   * {@link ClientProtocol#modifyCacheDirective}
    */
-  static class ModifyPathBasedCacheDirectiveOp extends FSEditLogOp {
-    PathBasedCacheDirective directive;
+  static class ModifyCacheDirectiveInfoOp extends FSEditLogOp {
+    CacheDirectiveInfo directive;
 
-    public ModifyPathBasedCacheDirectiveOp() {
+    public ModifyCacheDirectiveInfoOp() {
       super(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE);
     }
 
-    static ModifyPathBasedCacheDirectiveOp getInstance(OpInstanceCache cache) {
-      return (ModifyPathBasedCacheDirectiveOp) cache
+    static ModifyCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
+      return (ModifyCacheDirectiveInfoOp) cache
           .get(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE);
     }
 
-    public ModifyPathBasedCacheDirectiveOp setDirective(
-        PathBasedCacheDirective directive) {
+    public ModifyCacheDirectiveInfoOp setDirective(
+        CacheDirectiveInfo directive) {
       this.directive = directive;
       assert(directive.getId() != null);
       return this;
@@ -2978,8 +2978,8 @@ public abstract class FSEditLogOp {
 
     @Override
     void readFields(DataInputStream in, int logVersion) throws IOException {
-      PathBasedCacheDirective.Builder builder =
-          new PathBasedCacheDirective.Builder();
+      CacheDirectiveInfo.Builder builder =
+          new CacheDirectiveInfo.Builder();
       builder.setId(FSImageSerialization.readLong(in));
       byte flags = in.readByte();
       if ((flags & 0x1) != 0) {
@@ -2993,7 +2993,7 @@ public abstract class FSEditLogOp {
       }
       if ((flags & ~0x7) != 0) {
         throw new IOException("unknown flags set in " +
-            "ModifyPathBasedCacheDirectiveOp: " + flags);
+            "ModifyCacheDirectiveInfoOp: " + flags);
       }
       this.directive = builder.build();
       readRpcIds(in, logVersion);
@@ -3041,8 +3041,8 @@ public abstract class FSEditLogOp {
 
     @Override
     void fromXml(Stanza st) throws InvalidXmlException {
-      PathBasedCacheDirective.Builder builder =
-          new PathBasedCacheDirective.Builder();
+      CacheDirectiveInfo.Builder builder =
+          new CacheDirectiveInfo.Builder();
       builder.setId(Long.parseLong(st.getValue("ID")));
       String path = st.getValueOrNull("PATH");
       if (path != null) {
@@ -3063,7 +3063,7 @@ public abstract class FSEditLogOp {
     @Override
     public String toString() {
       StringBuilder builder = new StringBuilder();
-      builder.append("ModifyPathBasedCacheDirectiveOp[");
+      builder.append("ModifyCacheDirectiveInfoOp[");
       builder.append("id=").append(directive.getId());
       if (directive.getPath() != null) {
         builder.append(",").append("path=").append(directive.getPath());
@@ -3083,21 +3083,21 @@ public abstract class FSEditLogOp {
 
   /**
    * {@literal @AtMostOnce} for
-   * {@link ClientProtocol#removePathBasedCacheDirective}
+   * {@link ClientProtocol#removeCacheDirective}
    */
-  static class RemovePathBasedCacheDirectiveOp extends FSEditLogOp {
+  static class RemoveCacheDirectiveInfoOp extends FSEditLogOp {
     long id;
 
-    public RemovePathBasedCacheDirectiveOp() {
+    public RemoveCacheDirectiveInfoOp() {
       super(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE);
     }
 
-    static RemovePathBasedCacheDirectiveOp getInstance(OpInstanceCache cache) {
-      return (RemovePathBasedCacheDirectiveOp) cache
+    static RemoveCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
+      return (RemoveCacheDirectiveInfoOp) cache
           .get(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE);
     }
 
-    public RemovePathBasedCacheDirectiveOp setId(long id) {
+    public RemoveCacheDirectiveInfoOp setId(long id) {
       this.id = id;
       return this;
     }
@@ -3129,7 +3129,7 @@ public abstract class FSEditLogOp {
     @Override
     public String toString() {
       StringBuilder builder = new StringBuilder();
-      builder.append("RemovePathBasedCacheDirective [");
+      builder.append("RemoveCacheDirectiveInfo [");
       builder.append("id=" + Long.toString(id));
       appendRpcIdsToString(builder, rpcClientId, rpcCallId);
       builder.append("]");

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Thu Nov 21 20:06:09 2013
@@ -777,18 +777,22 @@ public class FSImage implements Closeabl
       
     if (dir.isQuotaSet()) {
       // check if quota is violated. It indicates a software bug.
+      final Quota.Counts q = dir.getQuotaCounts();
+
       final long namespace = counts.get(Quota.NAMESPACE) - parentNamespace;
-      if (Quota.isViolated(dir.getNsQuota(), namespace)) {
+      final long nsQuota = q.get(Quota.NAMESPACE);
+      if (Quota.isViolated(nsQuota, namespace)) {
         LOG.error("BUG: Namespace quota violation in image for "
             + dir.getFullPathName()
-            + " quota = " + dir.getNsQuota() + " < consumed = " + namespace);
+            + " quota = " + nsQuota + " < consumed = " + namespace);
       }
 
       final long diskspace = counts.get(Quota.DISKSPACE) - parentDiskspace;
-      if (Quota.isViolated(dir.getDsQuota(), diskspace)) {
+      final long dsQuota = q.get(Quota.DISKSPACE);
+      if (Quota.isViolated(dsQuota, diskspace)) {
         LOG.error("BUG: Diskspace quota violation in image for "
             + dir.getFullPathName()
-            + " quota = " + dir.getDsQuota() + " < consumed = " + diskspace);
+            + " quota = " + dsQuota + " < consumed = " + diskspace);
       }
 
       ((INodeDirectoryWithQuota)dir).setSpaceConsumed(namespace, diskspace);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Thu Nov 21 20:06:09 2013
@@ -371,8 +371,9 @@ public class FSImageFormat {
 
   /** Update the root node's attributes */
   private void updateRootAttr(INodeWithAdditionalFields root) {                                                           
-    long nsQuota = root.getNsQuota();
-    long dsQuota = root.getDsQuota();
+    final Quota.Counts q = root.getQuotaCounts();
+    final long nsQuota = q.get(Quota.NAMESPACE);
+    final long dsQuota = q.get(Quota.DISKSPACE);
     FSDirectory fsDir = namesystem.dir;
     if (nsQuota != -1 || dsQuota != -1) {
       fsDir.rootDir.setQuota(nsQuota, dsQuota);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java Thu Nov 21 20:06:09 2013
@@ -219,6 +219,12 @@ public class FSImageSerialization {
     out.writeLong(file.getPreferredBlockSize());
   }
 
+  private static void writeQuota(Quota.Counts quota, DataOutput out)
+      throws IOException {
+    out.writeLong(quota.get(Quota.NAMESPACE));
+    out.writeLong(quota.get(Quota.DISKSPACE));
+  }
+
   /**
    * Serialize a {@link INodeDirectory}
    * @param node The node to write
@@ -234,8 +240,8 @@ public class FSImageSerialization {
     out.writeLong(0);   // preferred block size
     out.writeInt(-1);   // # of blocks
 
-    out.writeLong(node.getNsQuota());
-    out.writeLong(node.getDsQuota());
+    writeQuota(node.getQuotaCounts(), out);
+
     if (node instanceof INodeDirectorySnapshottable) {
       out.writeBoolean(true);
     } else {
@@ -256,9 +262,7 @@ public class FSImageSerialization {
     writeLocalName(a, out);
     writePermissionStatus(a, out);
     out.writeLong(a.getModificationTime());
-
-    out.writeLong(a.getNsQuota());
-    out.writeLong(a.getDsQuota());
+    writeQuota(a.getQuotaCounts(), out);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Nov 21 20:06:09 2013
@@ -152,7 +152,8 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -7056,8 +7057,8 @@ public class FSNamesystem implements Nam
     }
   }
 
-  long addPathBasedCacheDirective(
-      PathBasedCacheDirective directive) throws IOException {
+  long addCacheDirective(
+      CacheDirectiveInfo directive) throws IOException {
     checkOperation(OperationCategory.WRITE);
     final FSPermissionChecker pc = isPermissionEnabled ?
         getPermissionChecker() : null;
@@ -7073,15 +7074,15 @@ public class FSNamesystem implements Nam
       checkOperation(OperationCategory.WRITE);
       if (isInSafeMode()) {
         throw new SafeModeException(
-            "Cannot add PathBasedCache directive", safeMode);
+            "Cannot add cache directive", safeMode);
       }
       if (directive.getId() != null) {
         throw new IOException("addDirective: you cannot specify an ID " +
             "for this operation.");
       }
-      PathBasedCacheDirective effectiveDirective = 
+      CacheDirectiveInfo effectiveDirective = 
           cacheManager.addDirective(directive, pc);
-      getEditLog().logAddPathBasedCacheDirective(effectiveDirective,
+      getEditLog().logAddCacheDirectiveInfo(effectiveDirective,
           cacheEntry != null);
       result = effectiveDirective.getId();
       success = true;
@@ -7091,15 +7092,15 @@ public class FSNamesystem implements Nam
         getEditLog().logSync();
       }
       if (isAuditEnabled() && isExternalInvocation()) {
-        logAuditEvent(success, "addPathBasedCacheDirective", null, null, null);
+        logAuditEvent(success, "addCacheDirective", null, null, null);
       }
       RetryCache.setState(cacheEntry, success, result);
     }
     return result;
   }
 
-  void modifyPathBasedCacheDirective(
-      PathBasedCacheDirective directive) throws IOException {
+  void modifyCacheDirective(
+      CacheDirectiveInfo directive) throws IOException {
     checkOperation(OperationCategory.WRITE);
     final FSPermissionChecker pc = isPermissionEnabled ?
         getPermissionChecker() : null;
@@ -7113,10 +7114,10 @@ public class FSNamesystem implements Nam
       checkOperation(OperationCategory.WRITE);
       if (isInSafeMode()) {
         throw new SafeModeException(
-            "Cannot add PathBasedCache directive", safeMode);
+            "Cannot add cache directive", safeMode);
       }
       cacheManager.modifyDirective(directive, pc);
-      getEditLog().logModifyPathBasedCacheDirective(directive,
+      getEditLog().logModifyCacheDirectiveInfo(directive,
           cacheEntry != null);
       success = true;
     } finally {
@@ -7125,13 +7126,13 @@ public class FSNamesystem implements Nam
         getEditLog().logSync();
       }
       if (isAuditEnabled() && isExternalInvocation()) {
-        logAuditEvent(success, "addPathBasedCacheDirective", null, null, null);
+        logAuditEvent(success, "addCacheDirective", null, null, null);
       }
       RetryCache.setState(cacheEntry, success);
     }
   }
 
-  void removePathBasedCacheDirective(Long id) throws IOException {
+  void removeCacheDirective(Long id) throws IOException {
     checkOperation(OperationCategory.WRITE);
     final FSPermissionChecker pc = isPermissionEnabled ?
         getPermissionChecker() : null;
@@ -7145,15 +7146,15 @@ public class FSNamesystem implements Nam
       checkOperation(OperationCategory.WRITE);
       if (isInSafeMode()) {
         throw new SafeModeException(
-            "Cannot remove PathBasedCache directives", safeMode);
+            "Cannot remove cache directives", safeMode);
       }
       cacheManager.removeDirective(id, pc);
-      getEditLog().logRemovePathBasedCacheDirective(id, cacheEntry != null);
+      getEditLog().logRemoveCacheDirectiveInfo(id, cacheEntry != null);
       success = true;
     } finally {
       writeUnlock();
       if (isAuditEnabled() && isExternalInvocation()) {
-        logAuditEvent(success, "removePathBasedCacheDirective", null, null,
+        logAuditEvent(success, "removeCacheDirective", null, null,
             null);
       }
       RetryCache.setState(cacheEntry, success);
@@ -7161,23 +7162,23 @@ public class FSNamesystem implements Nam
     getEditLog().logSync();
   }
 
-  BatchedListEntries<PathBasedCacheDirective> listPathBasedCacheDirectives(
-      long startId, PathBasedCacheDirective filter) throws IOException {
+  BatchedListEntries<CacheDirectiveEntry> listCacheDirectives(
+      long startId, CacheDirectiveInfo filter) throws IOException {
     checkOperation(OperationCategory.READ);
     final FSPermissionChecker pc = isPermissionEnabled ?
         getPermissionChecker() : null;
-    BatchedListEntries<PathBasedCacheDirective> results;
+    BatchedListEntries<CacheDirectiveEntry> results;
     readLock();
     boolean success = false;
     try {
       checkOperation(OperationCategory.READ);
       results =
-          cacheManager.listPathBasedCacheDirectives(startId, filter, pc);
+          cacheManager.listCacheDirectives(startId, filter, pc);
       success = true;
     } finally {
       readUnlock();
       if (isAuditEnabled() && isExternalInvocation()) {
-        logAuditEvent(success, "listPathBasedCacheDirectives", null, null,
+        logAuditEvent(success, "listCacheDirectives", null, null,
             null);
       }
     }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java Thu Nov 21 20:06:09 2013
@@ -383,10 +383,11 @@ public abstract class INode implements I
   public final ContentSummary computeAndConvertContentSummary(
       ContentSummaryComputationContext summary) {
     Content.Counts counts = computeContentSummary(summary).getCounts();
+    final Quota.Counts q = getQuotaCounts();
     return new ContentSummary(counts.get(Content.LENGTH),
         counts.get(Content.FILE) + counts.get(Content.SYMLINK),
-        counts.get(Content.DIRECTORY), getNsQuota(),
-        counts.get(Content.DISKSPACE), getDsQuota());
+        counts.get(Content.DIRECTORY), q.get(Quota.NAMESPACE),
+        counts.get(Content.DISKSPACE), q.get(Quota.DISKSPACE));
   }
 
   /**
@@ -412,18 +413,15 @@ public abstract class INode implements I
 
   /**
    * Get the quota set for this inode
-   * @return the quota if it is set; -1 otherwise
+   * @return the quota counts.  The count is -1 if it is not set.
    */
-  public long getNsQuota() {
-    return -1;
-  }
-
-  public long getDsQuota() {
-    return -1;
+  public Quota.Counts getQuotaCounts() {
+    return Quota.Counts.newInstance(-1, -1);
   }
   
   public final boolean isQuotaSet() {
-    return getNsQuota() >= 0 || getDsQuota() >= 0;
+    final Quota.Counts q = getQuotaCounts();
+    return q.get(Quota.NAMESPACE) >= 0 || q.get(Quota.DISKSPACE) >= 0;
   }
   
   /**

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Thu Nov 21 20:06:09 2013
@@ -612,8 +612,7 @@ public class INodeDirectory extends INod
   @Override
   public boolean metadataEquals(INodeDirectoryAttributes other) {
     return other != null
-        && getNsQuota() == other.getNsQuota()
-        && getDsQuota() == other.getDsQuota()
+        && getQuotaCounts().equals(other.getQuotaCounts())
         && getPermissionLong() == other.getPermissionLong();
   }
   

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java Thu Nov 21 20:06:09 2013
@@ -27,9 +27,7 @@ import com.google.common.base.Preconditi
  */
 @InterfaceAudience.Private
 public interface INodeDirectoryAttributes extends INodeAttributes {
-  public long getNsQuota();
-
-  public long getDsQuota();
+  public Quota.Counts getQuotaCounts();
 
   public boolean metadataEquals(INodeDirectoryAttributes other);
   
@@ -46,20 +44,14 @@ public interface INodeDirectoryAttribute
     }
 
     @Override
-    public long getNsQuota() {
-      return -1;
-    }
-
-    @Override
-    public long getDsQuota() {
-      return -1;
+    public Quota.Counts getQuotaCounts() {
+      return Quota.Counts.newInstance(-1, -1);
     }
 
     @Override
     public boolean metadataEquals(INodeDirectoryAttributes other) {
       return other != null
-          && getNsQuota() == other.getNsQuota()
-          && getDsQuota() == other.getDsQuota()
+          && this.getQuotaCounts().equals(other.getQuotaCounts())
           && getPermissionLong() == other.getPermissionLong();
     }
   }
@@ -68,6 +60,7 @@ public interface INodeDirectoryAttribute
     private final long nsQuota;
     private final long dsQuota;
 
+
     public CopyWithQuota(byte[] name, PermissionStatus permissions,
         long modificationTime, long nsQuota, long dsQuota) {
       super(name, permissions, modificationTime);
@@ -78,18 +71,14 @@ public interface INodeDirectoryAttribute
     public CopyWithQuota(INodeDirectory dir) {
       super(dir);
       Preconditions.checkArgument(dir.isQuotaSet());
-      this.nsQuota = dir.getNsQuota();
-      this.dsQuota = dir.getDsQuota();
+      final Quota.Counts q = dir.getQuotaCounts();
+      this.nsQuota = q.get(Quota.NAMESPACE);
+      this.dsQuota = q.get(Quota.DISKSPACE);
     }
     
     @Override
-    public final long getNsQuota() {
-      return nsQuota;
-    }
-
-    @Override
-    public final long getDsQuota() {
-      return dsQuota;
+    public Quota.Counts getQuotaCounts() {
+      return Quota.Counts.newInstance(nsQuota, dsQuota);
     }
   }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java Thu Nov 21 20:06:09 2013
@@ -44,7 +44,7 @@ public class INodeDirectoryWithQuota ext
    * @param dsQuota Diskspace quota to be assigned to this indoe
    * @param other The other inode from which all other properties are copied
    */
-  public INodeDirectoryWithQuota(INodeDirectory other, boolean adopt,
+  INodeDirectoryWithQuota(INodeDirectory other, boolean adopt,
       long nsQuota, long dsQuota) {
     super(other, adopt);
     final Quota.Counts counts = other.computeQuotaUsage();
@@ -54,6 +54,11 @@ public class INodeDirectoryWithQuota ext
     this.dsQuota = dsQuota;
   }
   
+  public INodeDirectoryWithQuota(INodeDirectory other, boolean adopt,
+      Quota.Counts quota) {
+    this(other, adopt, quota.get(Quota.NAMESPACE), quota.get(Quota.DISKSPACE));
+  }
+
   /** constructor with no quota verification */
   INodeDirectoryWithQuota(long id, byte[] name, PermissionStatus permissions,
       long modificationTime, long nsQuota, long dsQuota) {
@@ -67,20 +72,9 @@ public class INodeDirectoryWithQuota ext
     super(id, name, permissions, 0L);
   }
   
-  /** Get this directory's namespace quota
-   * @return this directory's namespace quota
-   */
-  @Override
-  public long getNsQuota() {
-    return nsQuota;
-  }
-  
-  /** Get this directory's diskspace quota
-   * @return this directory's diskspace quota
-   */
   @Override
-  public long getDsQuota() {
-    return dsQuota;
+  public Quota.Counts getQuotaCounts() {
+    return Quota.Counts.newInstance(nsQuota, dsQuota);
   }
   
   /** Set this directory's quota
@@ -120,7 +114,7 @@ public class INodeDirectoryWithQuota ext
   }
   
   private void checkDiskspace(final long computed) {
-    if (-1 != getDsQuota() && diskspace != computed) {
+    if (-1 != getQuotaCounts().get(Quota.DISKSPACE) && diskspace != computed) {
       NameNode.LOG.error("BUG: Inconsistent diskspace for directory "
           + getFullPathName() + ". Cached = " + diskspace
           + " != Computed = " + computed);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java Thu Nov 21 20:06:09 2013
@@ -295,16 +295,11 @@ public abstract class INodeReference ext
   }
 
   @Override
-  public final long getNsQuota() {
-    return referred.getNsQuota();
+  public Quota.Counts getQuotaCounts() {
+    return referred.getQuotaCounts();
   }
 
   @Override
-  public final long getDsQuota() {
-    return referred.getDsQuota();
-  }
-  
-  @Override
   public final void clear() {
     super.clear();
     referred = null;

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Thu Nov 21 20:06:09 2013
@@ -804,6 +804,10 @@ public class NameNode implements NameNod
     return httpServer.getHttpAddress();
   }
 
+  /**
+   * @return NameNode HTTPS address, used by the Web UI, image transfer,
+   *    and HTTP-based file system clients like Hftp and WebHDFS
+   */
   public InetSocketAddress getHttpsAddress() {
     return httpServer.getHttpsAddress();
   }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Thu Nov 21 20:06:09 2013
@@ -61,7 +61,8 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -1236,52 +1237,52 @@ class NameNodeRpcServer implements Namen
   }
 
   @Override
-  public long addPathBasedCacheDirective(
-      PathBasedCacheDirective path) throws IOException {
-    return namesystem.addPathBasedCacheDirective(path);
+  public long addCacheDirective(
+      CacheDirectiveInfo path) throws IOException {
+    return namesystem.addCacheDirective(path);
   }
 
   @Override
-  public void modifyPathBasedCacheDirective(
-      PathBasedCacheDirective directive) throws IOException {
-    namesystem.modifyPathBasedCacheDirective(directive);
+  public void modifyCacheDirective(
+      CacheDirectiveInfo directive) throws IOException {
+    namesystem.modifyCacheDirective(directive);
   }
 
   @Override
-  public void removePathBasedCacheDirective(long id) throws IOException {
-    namesystem.removePathBasedCacheDirective(id);
+  public void removeCacheDirective(long id) throws IOException {
+    namesystem.removeCacheDirective(id);
   }
 
-  private class ServerSidePathBasedCacheEntriesIterator
-      extends BatchedRemoteIterator<Long, PathBasedCacheDirective> {
+  private class ServerSideCacheEntriesIterator 
+      extends BatchedRemoteIterator<Long, CacheDirectiveEntry> {
 
-    private final PathBasedCacheDirective filter;
+    private final CacheDirectiveInfo filter;
     
-    public ServerSidePathBasedCacheEntriesIterator(Long firstKey, 
-        PathBasedCacheDirective filter) {
+    public ServerSideCacheEntriesIterator (Long firstKey, 
+        CacheDirectiveInfo filter) {
       super(firstKey);
       this.filter = filter;
     }
 
     @Override
-    public BatchedEntries<PathBasedCacheDirective> makeRequest(
+    public BatchedEntries<CacheDirectiveEntry> makeRequest(
         Long nextKey) throws IOException {
-      return namesystem.listPathBasedCacheDirectives(nextKey, filter);
+      return namesystem.listCacheDirectives(nextKey, filter);
     }
 
     @Override
-    public Long elementToPrevKey(PathBasedCacheDirective entry) {
-      return entry.getId();
+    public Long elementToPrevKey(CacheDirectiveEntry entry) {
+      return entry.getInfo().getId();
     }
   }
   
   @Override
-  public RemoteIterator<PathBasedCacheDirective> listPathBasedCacheDirectives(long prevId,
-      PathBasedCacheDirective filter) throws IOException {
+  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(long prevId,
+      CacheDirectiveInfo filter) throws IOException {
     if (filter == null) {
-      filter = new PathBasedCacheDirective.Builder().build();
+      filter = new CacheDirectiveInfo.Builder().build();
     }
-    return new ServerSidePathBasedCacheEntriesIterator(prevId, filter);
+    return new ServerSideCacheEntriesIterator(prevId, filter);
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Thu Nov 21 20:06:09 2013
@@ -23,7 +23,6 @@ import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.lang.management.MemoryMXBean;
 import java.lang.management.MemoryUsage;
-import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URLEncoder;
@@ -57,7 +56,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
-import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -1087,7 +1085,7 @@ class NamenodeJspHelper {
           doc.endTag();
 
           doc.startTag("ds_quota");
-          doc.pcdata(""+inode.getDsQuota());
+          doc.pcdata(""+inode.getQuotaCounts().get(Quota.DISKSPACE));
           doc.endTag();
 
           doc.startTag("permission_status");

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java Thu Nov 21 20:06:09 2013
@@ -41,7 +41,7 @@ public enum Quota {
     }
     
     Counts() {
-      super(Quota.values());
+      super(Quota.class);
     }
   }
 

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java Thu Nov 21 20:06:09 2013
@@ -491,7 +491,7 @@ public class INodeDirectoryWithSnapshot 
 
   INodeDirectoryWithSnapshot(INodeDirectory that, boolean adopt,
       DirectoryDiffList diffs) {
-    super(that, adopt, that.getNsQuota(), that.getDsQuota());
+    super(that, adopt, that.getQuotaCounts());
     this.diffs = diffs != null? diffs: new DirectoryDiffList();
   }
 

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Thu Nov 21 20:06:09 2013
@@ -68,6 +68,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.web.JsonUtil;
 import org.apache.hadoop.hdfs.web.ParamFilter;
+import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
@@ -98,6 +99,7 @@ import org.apache.hadoop.hdfs.web.resour
 import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
 import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.Credentials;
@@ -214,7 +216,8 @@ public class NamenodeWebHdfsMethods {
     final Credentials c = DelegationTokenSecretManager.createCredentials(
         namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
     final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next();
-    t.setKind(WebHdfsFileSystem.TOKEN_KIND);
+    Text kind = request.getScheme().equals("http") ? WebHdfsFileSystem.TOKEN_KIND : SWebHdfsFileSystem.TOKEN_KIND;
+    t.setKind(kind);
     return t;
   }
 

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java Thu Nov 21 20:06:09 2013
@@ -30,8 +30,10 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.server.namenode.CachePool;
 import org.apache.hadoop.hdfs.tools.TableListing.Justification;
 import org.apache.hadoop.ipc.RemoteException;
@@ -120,7 +122,7 @@ public class CacheAdmin extends Configur
     int run(Configuration conf, List<String> args) throws IOException;
   }
 
-  private static class AddPathBasedCacheDirectiveCommand implements Command {
+  private static class AddCacheDirectiveInfoCommand implements Command {
     @Override
     public String getName() {
       return "-addDirective";
@@ -143,7 +145,7 @@ public class CacheAdmin extends Configur
           "added. You must have write permission on the cache pool "
           + "in order to add new directives.");
       return getShortUsage() + "\n" +
-        "Add a new PathBasedCache directive.\n\n" +
+        "Add a new cache directive.\n\n" +
         listing.toString();
     }
 
@@ -171,14 +173,14 @@ public class CacheAdmin extends Configur
       }
         
       DistributedFileSystem dfs = getDFS(conf);
-      PathBasedCacheDirective directive = new PathBasedCacheDirective.Builder().
+      CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder().
           setPath(new Path(path)).
           setReplication(replication).
           setPool(poolName).
           build();
       try {
-        long id = dfs.addPathBasedCacheDirective(directive);
-        System.out.println("Added PathBasedCache entry " + id);
+        long id = dfs.addCacheDirective(directive);
+        System.out.println("Added cache directive " + id);
       } catch (IOException e) {
         System.err.println(prettifyException(e));
         return 2;
@@ -188,7 +190,7 @@ public class CacheAdmin extends Configur
     }
   }
 
-  private static class RemovePathBasedCacheDirectiveCommand implements Command {
+  private static class RemoveCacheDirectiveInfoCommand implements Command {
     @Override
     public String getName() {
       return "-removeDirective";
@@ -205,7 +207,7 @@ public class CacheAdmin extends Configur
       listing.addRow("<id>", "The id of the cache directive to remove.  " + 
         "You must have write permission on the pool of the " +
         "directive in order to remove it.  To see a list " +
-        "of PathBasedCache directive IDs, use the -listDirectives command.");
+        "of cache directive IDs, use the -listDirectives command.");
       return getShortUsage() + "\n" +
         "Remove a cache directive.\n\n" +
         listing.toString();
@@ -238,8 +240,8 @@ public class CacheAdmin extends Configur
       }
       DistributedFileSystem dfs = getDFS(conf);
       try {
-        dfs.getClient().removePathBasedCacheDirective(id);
-        System.out.println("Removed PathBasedCache directive " + id);
+        dfs.getClient().removeCacheDirective(id);
+        System.out.println("Removed cached directive " + id);
       } catch (IOException e) {
         System.err.println(prettifyException(e));
         return 2;
@@ -248,7 +250,7 @@ public class CacheAdmin extends Configur
     }
   }
 
-  private static class ModifyPathBasedCacheDirectiveCommand implements Command {
+  private static class ModifyCacheDirectiveInfoCommand implements Command {
     @Override
     public String getName() {
       return "-modifyDirective";
@@ -273,14 +275,14 @@ public class CacheAdmin extends Configur
           "added. You must have write permission on the cache pool "
           + "in order to move a directive into it. (optional)");
       return getShortUsage() + "\n" +
-        "Modify a PathBasedCache directive.\n\n" +
+        "Modify a cache directive.\n\n" +
         listing.toString();
     }
 
     @Override
     public int run(Configuration conf, List<String> args) throws IOException {
-      PathBasedCacheDirective.Builder builder =
-        new PathBasedCacheDirective.Builder();
+      CacheDirectiveInfo.Builder builder =
+        new CacheDirectiveInfo.Builder();
       boolean modified = false;
       String idString = StringUtils.popOptionWithArgument("-id", args);
       if (idString == null) {
@@ -316,8 +318,8 @@ public class CacheAdmin extends Configur
       }
       DistributedFileSystem dfs = getDFS(conf);
       try {
-        dfs.modifyPathBasedCacheDirective(builder.build());
-        System.out.println("Modified PathBasedCache entry " + idString);
+        dfs.modifyCacheDirective(builder.build());
+        System.out.println("Modified cache directive " + idString);
       } catch (IOException e) {
         System.err.println(prettifyException(e));
         return 2;
@@ -326,7 +328,7 @@ public class CacheAdmin extends Configur
     }
   }
 
-  private static class RemovePathBasedCacheDirectivesCommand implements Command {
+  private static class RemoveCacheDirectiveInfosCommand implements Command {
     @Override
     public String getName() {
       return "-removeDirectives";
@@ -362,31 +364,31 @@ public class CacheAdmin extends Configur
         return 1;
       }
       DistributedFileSystem dfs = getDFS(conf);
-      RemoteIterator<PathBasedCacheDirective> iter =
-          dfs.listPathBasedCacheDirectives(
-              new PathBasedCacheDirective.Builder().
+      RemoteIterator<CacheDirectiveEntry> iter =
+          dfs.listCacheDirectives(
+              new CacheDirectiveInfo.Builder().
                   setPath(new Path(path)).build());
       int exitCode = 0;
       while (iter.hasNext()) {
-        PathBasedCacheDirective directive = iter.next();
+        CacheDirectiveEntry entry = iter.next();
         try {
-          dfs.removePathBasedCacheDirective(directive.getId());
-          System.out.println("Removed PathBasedCache directive " +
-              directive.getId());
+          dfs.removeCacheDirective(entry.getInfo().getId());
+          System.out.println("Removed cache directive " +
+              entry.getInfo().getId());
         } catch (IOException e) {
           System.err.println(prettifyException(e));
           exitCode = 2;
         }
       }
       if (exitCode == 0) {
-        System.out.println("Removed every PathBasedCache directive with path " +
+        System.out.println("Removed every cache directive with path " +
             path);
       }
       return exitCode;
     }
   }
 
-  private static class ListPathBasedCacheDirectiveCommand implements Command {
+  private static class ListCacheDirectiveInfoCommand implements Command {
     @Override
     public String getName() {
       return "-listDirectives";
@@ -394,27 +396,28 @@ public class CacheAdmin extends Configur
 
     @Override
     public String getShortUsage() {
-      return "[" + getName() + " [-path <path>] [-pool <pool>]]\n";
+      return "[" + getName() + " [-stats] [-path <path>] [-pool <pool>]]\n";
     }
 
     @Override
     public String getLongUsage() {
       TableListing listing = getOptionDescriptionListing();
       listing.addRow("<path>", "List only " +
-          "PathBasedCache directives with this path. " +
-          "Note that if there is a PathBasedCache directive for <path> " +
+          "cache directives with this path. " +
+          "Note that if there is a cache directive for <path> " +
           "in a cache pool that we don't have read access for, it " + 
           "will not be listed.");
       listing.addRow("<pool>", "List only path cache directives in that pool.");
+      listing.addRow("-stats", "List path-based cache directive statistics.");
       return getShortUsage() + "\n" +
-        "List PathBasedCache directives.\n\n" +
+        "List cache directives.\n\n" +
         listing.toString();
     }
 
     @Override
     public int run(Configuration conf, List<String> args) throws IOException {
-      PathBasedCacheDirective.Builder builder =
-          new PathBasedCacheDirective.Builder();
+      CacheDirectiveInfo.Builder builder =
+          new CacheDirectiveInfo.Builder();
       String pathFilter = StringUtils.popOptionWithArgument("-path", args);
       if (pathFilter != null) {
         builder.setPath(new Path(pathFilter));
@@ -423,28 +426,42 @@ public class CacheAdmin extends Configur
       if (poolFilter != null) {
         builder.setPool(poolFilter);
       }
+      boolean printStats = StringUtils.popOption("-stats", args);
       if (!args.isEmpty()) {
         System.err.println("Can't understand argument: " + args.get(0));
         return 1;
       }
-      TableListing tableListing = new TableListing.Builder().
-          addField("ID", Justification.LEFT).
+      TableListing.Builder tableBuilder = new TableListing.Builder().
+          addField("ID", Justification.RIGHT).
           addField("POOL", Justification.LEFT).
-          addField("REPLICATION", Justification.LEFT).
-          addField("PATH", Justification.LEFT).
-          build();
+          addField("REPLICATION", Justification.RIGHT).
+          addField("PATH", Justification.LEFT);
+      if (printStats) {
+        tableBuilder.addField("NEEDED", Justification.RIGHT).
+                    addField("CACHED", Justification.RIGHT).
+                    addField("FILES", Justification.RIGHT);
+      }
+      TableListing tableListing = tableBuilder.build();
+
       DistributedFileSystem dfs = getDFS(conf);
-      RemoteIterator<PathBasedCacheDirective> iter =
-          dfs.listPathBasedCacheDirectives(builder.build());
+      RemoteIterator<CacheDirectiveEntry> iter =
+          dfs.listCacheDirectives(builder.build());
       int numEntries = 0;
       while (iter.hasNext()) {
-        PathBasedCacheDirective directive = iter.next();
-        String row[] = new String[] {
-            "" + directive.getId(), directive.getPool(),
-            "" + directive.getReplication(),
-            directive.getPath().toUri().getPath(),
-        };
-        tableListing.addRow(row);
+        CacheDirectiveEntry entry = iter.next();
+        CacheDirectiveInfo directive = entry.getInfo();
+        CacheDirectiveStats stats = entry.getStats();
+        List<String> row = new LinkedList<String>();
+        row.add("" + directive.getId());
+        row.add(directive.getPool());
+        row.add("" + directive.getReplication());
+        row.add(directive.getPath().toUri().getPath());
+        if (printStats) {
+          row.add("" + stats.getBytesNeeded());
+          row.add("" + stats.getBytesCached());
+          row.add("" + stats.getFilesAffected());
+        }
+        tableListing.addRow(row.toArray(new String[0]));
         numEntries++;
       }
       System.out.print(String.format("Found %d entr%s\n",
@@ -734,7 +751,7 @@ public class CacheAdmin extends Configur
           addField("OWNER", Justification.LEFT).
           addField("GROUP", Justification.LEFT).
           addField("MODE", Justification.LEFT).
-          addField("WEIGHT", Justification.LEFT).
+          addField("WEIGHT", Justification.RIGHT).
           build();
       int numResults = 0;
       try {
@@ -824,11 +841,11 @@ public class CacheAdmin extends Configur
   }
 
   private static Command[] COMMANDS = {
-    new AddPathBasedCacheDirectiveCommand(),
-    new ModifyPathBasedCacheDirectiveCommand(),
-    new ListPathBasedCacheDirectiveCommand(),
-    new RemovePathBasedCacheDirectiveCommand(),
-    new RemovePathBasedCacheDirectivesCommand(),
+    new AddCacheDirectiveInfoCommand(),
+    new ModifyCacheDirectiveInfoCommand(),
+    new ListCacheDirectiveInfoCommand(),
+    new RemoveCacheDirectiveInfoCommand(),
+    new RemoveCacheDirectiveInfosCommand(),
     new AddCachePoolCommand(),
     new ModifyCachePoolCommand(),
     new RemoveCachePoolCommand(),

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java Thu Nov 21 20:06:09 2013
@@ -30,9 +30,9 @@ import org.apache.hadoop.classification.
  * Example:
  * 
  * NAME   OWNER   GROUP   MODE       WEIGHT
- * pool1  andrew  andrew  rwxr-xr-x  100
- * pool2  andrew  andrew  rwxr-xr-x  100
- * pool3  andrew  andrew  rwxr-xr-x  100
+ * pool1  andrew  andrew  rwxr-xr-x     100
+ * pool2  andrew  andrew  rwxr-xr-x     100
+ * pool3  andrew  andrew  rwxr-xr-x     100
  * 
  */
 @InterfaceAudience.Private

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java Thu Nov 21 20:06:09 2013
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.util;
 
+import java.util.Arrays;
 import java.util.HashMap;
 
 import com.google.common.base.Preconditions;
@@ -34,21 +35,19 @@ import com.google.common.base.Preconditi
  * @param <E> the enum type
  */
 public class EnumCounters<E extends Enum<E>> {
-  /** An array of enum constants. */
-  private final E[] enumConstants;
+  /** The class of the enum. */
+  private final Class<E> enumClass;
   /** The counter array, counters[i] corresponds to the enumConstants[i]. */
   private final long[] counters;
 
   /**
    * Construct counters for the given enum constants.
-   * @param enumConstants an array of enum constants such that, 
-   *                      for all i, enumConstants[i].ordinal() == i.
+   * @param enumClass the enum class of the counters.
    */
-  public EnumCounters(final E[] enumConstants) {
-    for(int i = 0; i < enumConstants.length; i++) {
-      Preconditions.checkArgument(enumConstants[i].ordinal() == i);
-    }
-    this.enumConstants = enumConstants;
+  public EnumCounters(final Class<E> enumClass) {
+    final E[] enumConstants = enumClass.getEnumConstants();
+    Preconditions.checkNotNull(enumConstants);
+    this.enumClass = enumClass;
     this.counters = new long[enumConstants.length];
   }
   
@@ -69,6 +68,13 @@ public class EnumCounters<E extends Enum
     counters[e.ordinal()] = value;
   }
 
+  /** Set this counters to that counters. */
+  public final void set(final EnumCounters<E> that) {
+    for(int i = 0; i < counters.length; i++) {
+      this.counters[i] = that.counters[i];
+    }
+  }
+
   /** Add the given value to counter e. */
   public final void add(final E e, final long value) {
     counters[e.ordinal()] += value;
@@ -86,7 +92,7 @@ public class EnumCounters<E extends Enum
     counters[e.ordinal()] -= value;
   }
 
-  /** Subtract that counters from this counters. */
+  /** Subtract this counters from that counters. */
   public final void subtract(final EnumCounters<E> that) {
     for(int i = 0; i < counters.length; i++) {
       this.counters[i] -= that.counters[i];
@@ -94,7 +100,25 @@ public class EnumCounters<E extends Enum
   }
 
   @Override
+  public boolean equals(Object obj) {
+    if (obj == this) {
+      return true;
+    } else if (obj == null || !(obj instanceof EnumCounters)) {
+      return false;
+    }
+    final EnumCounters<?> that = (EnumCounters<?>)obj;
+    return this.enumClass == that.enumClass
+        && Arrays.equals(this.counters, that.counters);
+  }
+
+  @Override
+  public int hashCode() {
+    return Arrays.hashCode(counters);
+  }
+
+  @Override
   public String toString() {
+    final E[] enumConstants = enumClass.getEnumConstants();
     final StringBuilder b = new StringBuilder();
     for(int i = 0; i < counters.length; i++) {
       final String name = enumConstants[i].name();

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java Thu Nov 21 20:06:09 2013
@@ -58,7 +58,8 @@ final class TokenAspect<T extends FileSy
     public boolean handleKind(Text kind) {
       return kind.equals(HftpFileSystem.TOKEN_KIND)
           || kind.equals(HsftpFileSystem.TOKEN_KIND)
-          || kind.equals(WebHdfsFileSystem.TOKEN_KIND);
+          || kind.equals(WebHdfsFileSystem.TOKEN_KIND)
+          || kind.equals(SWebHdfsFileSystem.TOKEN_KIND);
     }
 
     @Override
@@ -83,6 +84,8 @@ final class TokenAspect<T extends FileSy
         uri = DFSUtil.createUri(HsftpFileSystem.SCHEME, address);
       } else if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) {
         uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, address);
+      } else if (kind.equals(SWebHdfsFileSystem.TOKEN_KIND)) {
+        uri = DFSUtil.createUri(SWebHdfsFileSystem.SCHEME, address);
       } else {
         throw new IllegalArgumentException("Unsupported scheme");
       }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Thu Nov 21 20:06:09 2013
@@ -56,7 +56,6 @@ import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
-import org.apache.hadoop.hdfs.web.TokenAspect.DTSelecorByKind;
 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
 import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
@@ -98,7 +97,6 @@ import org.apache.hadoop.security.token.
 import org.apache.hadoop.util.Progressable;
 import org.mortbay.util.ajax.JSON;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Charsets;
 import com.google.common.collect.Lists;
 
@@ -118,8 +116,7 @@ public class WebHdfsFileSystem extends F
 
   /** Delegation token kind */
   public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
-  protected TokenAspect<WebHdfsFileSystem> tokenAspect = new TokenAspect<WebHdfsFileSystem>(
-      this, TOKEN_KIND);
+  protected TokenAspect<WebHdfsFileSystem> tokenAspect;
 
   private UserGroupInformation ugi;
   private URI uri;
@@ -140,17 +137,44 @@ public class WebHdfsFileSystem extends F
     return SCHEME;
   }
 
+  /**
+   * return the underlying transport protocol (http / https).
+   */
+  protected String getTransportScheme() {
+    return "http";
+  }
+
+  /**
+   * Initialize tokenAspect. This function is intended to
+   * be overridden by SWebHdfsFileSystem.
+   */
+  protected synchronized void initializeTokenAspect() {
+    tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, TOKEN_KIND);
+  }
+
+  /**
+   * Initialize connectionFactory. This function is intended to
+   * be overridden by SWebHdfsFileSystem.
+   */
+  protected void initializeConnectionFactory(Configuration conf)
+      throws IOException {
+    connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
+  }
+
   @Override
   public synchronized void initialize(URI uri, Configuration conf
       ) throws IOException {
     super.initialize(uri, conf);
     setConf(conf);
+    initializeTokenAspect();
+    initializeConnectionFactory(conf);
+
     ugi = UserGroupInformation.getCurrentUser();
 
     try {
       this.uri = new URI(uri.getScheme(), uri.getAuthority(), null,
           null, null);
-      this.nnAddrs = DFSUtil.resolve(this.uri, getDefaultPort(), conf);
+      this.nnAddrs = DFSUtil.resolveWebHdfsUri(this.uri, conf);
     } catch (URISyntaxException e) {
       throw new IllegalArgumentException(e);
     }
@@ -342,7 +366,7 @@ public class WebHdfsFileSystem extends F
    */
   private URL getNamenodeURL(String path, String query) throws IOException {
     InetSocketAddress nnAddr = getCurrentNNAddr();
-    final URL url = new URL("http", nnAddr.getHostName(),
+    final URL url = new URL(getTransportScheme(), nnAddr.getHostName(),
           nnAddr.getPort(), path + '?' + query);
     if (LOG.isTraceEnabled()) {
       LOG.trace("url=" + url);
@@ -840,7 +864,9 @@ public class WebHdfsFileSystem extends F
   @Override
   public void close() throws IOException {
     super.close();
-    tokenAspect.removeRenewAction();
+    synchronized (this) {
+      tokenAspect.removeRenewAction();
+    }
   }
 
   class OffsetUrlOpener extends ByteRangeInputStream.URLOpener {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Thu Nov 21 20:06:09 2013
@@ -364,46 +364,53 @@ message IsFileClosedResponseProto {
   required bool result = 1;
 }
 
-message PathBasedCacheDirectiveInfoProto {
+message CacheDirectiveInfoProto {
   optional int64 id = 1;
   optional string path = 2;
   optional uint32 replication = 3;
   optional string pool = 4;
 }
 
-message AddPathBasedCacheDirectiveRequestProto {
-  required PathBasedCacheDirectiveInfoProto info = 1;
+message CacheDirectiveStatsProto {
+  required int64 bytesNeeded = 1;
+  required int64 bytesCached = 2;
+  required int64 filesAffected = 3;
 }
 
-message AddPathBasedCacheDirectiveResponseProto {
+message AddCacheDirectiveRequestProto {
+  required CacheDirectiveInfoProto info = 1;
+}
+
+message AddCacheDirectiveResponseProto {
   required int64 id = 1;
 }
 
-message ModifyPathBasedCacheDirectiveRequestProto {
-  required PathBasedCacheDirectiveInfoProto info = 1;
+message ModifyCacheDirectiveRequestProto {
+  required CacheDirectiveInfoProto info = 1;
 }
 
-message ModifyPathBasedCacheDirectiveResponseProto {
+message ModifyCacheDirectiveResponseProto {
 }
 
-message RemovePathBasedCacheDirectiveRequestProto {
+message RemoveCacheDirectiveRequestProto {
   required int64 id = 1;
 }
 
-message RemovePathBasedCacheDirectiveResponseProto {
+message RemoveCacheDirectiveResponseProto {
 }
 
-message ListPathBasedCacheDirectivesRequestProto {
+message ListCacheDirectivesRequestProto {
   required int64 prevId = 1;
-  required PathBasedCacheDirectiveInfoProto filter = 2;
+  required CacheDirectiveInfoProto filter = 2;
 }
 
-message ListPathBasedCacheDirectivesElementProto {
-  required PathBasedCacheDirectiveInfoProto info = 1;
+message CacheDirectiveEntryProto {
+  required CacheDirectiveInfoProto info = 1;
+  required CacheDirectiveStatsProto stats = 2;
 }
 
-message ListPathBasedCacheDirectivesResponseProto {
-  repeated ListPathBasedCacheDirectivesElementProto elements = 1;
+message ListCacheDirectivesResponseProto {
+  repeated CacheDirectiveEntryProto elements = 1;
   required bool hasMore = 2;
 }
 
@@ -631,14 +638,14 @@ service ClientNamenodeProtocol {
       returns(ListCorruptFileBlocksResponseProto);
   rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto);
   rpc getFileInfo(GetFileInfoRequestProto) returns(GetFileInfoResponseProto);
-  rpc addPathBasedCacheDirective(AddPathBasedCacheDirectiveRequestProto)
-      returns (AddPathBasedCacheDirectiveResponseProto);
-  rpc modifyPathBasedCacheDirective(ModifyPathBasedCacheDirectiveRequestProto)
-      returns (ModifyPathBasedCacheDirectiveResponseProto);
-  rpc removePathBasedCacheDirective(RemovePathBasedCacheDirectiveRequestProto)
-      returns (RemovePathBasedCacheDirectiveResponseProto);
-  rpc listPathBasedCacheDirectives(ListPathBasedCacheDirectivesRequestProto)
-      returns (ListPathBasedCacheDirectivesResponseProto);
+  rpc addCacheDirective(AddCacheDirectiveRequestProto)
+      returns (AddCacheDirectiveResponseProto);
+  rpc modifyCacheDirective(ModifyCacheDirectiveRequestProto)
+      returns (ModifyCacheDirectiveResponseProto);
+  rpc removeCacheDirective(RemoveCacheDirectiveRequestProto)
+      returns (RemoveCacheDirectiveResponseProto);
+  rpc listCacheDirectives(ListCacheDirectivesRequestProto)
+      returns (ListCacheDirectivesResponseProto);
   rpc addCachePool(AddCachePoolRequestProto)
       returns(AddCachePoolResponseProto);
   rpc modifyCachePool(ModifyCachePoolRequestProto)

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem Thu Nov 21 20:06:09 2013
@@ -17,3 +17,4 @@ org.apache.hadoop.hdfs.DistributedFileSy
 org.apache.hadoop.hdfs.web.HftpFileSystem
 org.apache.hadoop.hdfs.web.HsftpFileSystem
 org.apache.hadoop.hdfs.web.WebHdfsFileSystem
+org.apache.hadoop.hdfs.web.SWebHdfsFileSystem

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Thu Nov 21 20:06:09 2013
@@ -1179,7 +1179,7 @@
 
 <property>
   <name>dfs.webhdfs.enabled</name>
-  <value>false</value>
+  <value>true</value>
   <description>
     Enable WebHDFS (REST API) in Namenodes and Datanodes.
   </description>

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm Thu Nov 21 20:06:09 2013
@@ -118,7 +118,7 @@ Centralized Cache Management in HDFS
 
   Usage: <<<hdfs cacheadmin -addDirective -path <path> -replication <replication> -pool <pool-name> >>>
 
-  Add a new PathBasedCache directive.
+  Add a new cache directive.
 
 *--+--+
 \<path\> | A path to cache. The path can be a directory or a file.
@@ -135,7 +135,7 @@ Centralized Cache Management in HDFS
   Remove a cache directive.
 
 *--+--+
-\<id\> | The id of the cache directive to remove.  You must have write permission on the pool of the directive in order to remove it.  To see a list of PathBasedCache directive IDs, use the -listDirectives command.
+\<id\> | The id of the cache directive to remove.  You must have write permission on the pool of the directive in order to remove it.  To see a list of cachedirective IDs, use the -listDirectives command.
 *--+--+
 
 *** {removeDirectives}
@@ -152,10 +152,10 @@ Centralized Cache Management in HDFS
 
   Usage: <<<hdfs cacheadmin -listDirectives [-path <path>] [-pool <pool>] >>>
 
-  List PathBasedCache directives.
+  List cache directives.
 
 *--+--+
-\<path\> | List only PathBasedCache directives with this path. Note that if there is a PathBasedCache directive for <path> in a cache pool that we don't have read access for, it will not be listed.
+\<path\> | List only cache directives with this path. Note that if there is a cache directive for <path> in a cache pool that we don't have read access for, it will not be listed.
 *--+--+
 \<pool\> | List only path cache directives in that pool.
 *--+--+

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java Thu Nov 21 20:06:09 2013
@@ -89,7 +89,7 @@ abstract public class TestSymlinkHdfs ex
     conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     conf.set(FsPermission.UMASK_LABEL, "000");
     cluster = new MiniDFSCluster.Builder(conf).build();
-    webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
+    webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
     dfs = cluster.getFileSystem();
   }
 

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Thu Nov 21 20:06:09 2013
@@ -1038,20 +1038,20 @@ public class DFSTestUtil {
     // OP_MODIFY_CACHE_POOL
     filesystem.modifyCachePool(new CachePoolInfo("pool1").setWeight(99));
     // OP_ADD_PATH_BASED_CACHE_DIRECTIVE
-    long id = filesystem.addPathBasedCacheDirective(
-        new PathBasedCacheDirective.Builder().
+    long id = filesystem.addCacheDirective(
+        new CacheDirectiveInfo.Builder().
             setPath(new Path("/path")).
             setReplication((short)1).
             setPool("pool1").
             build());
     // OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE
-    filesystem.modifyPathBasedCacheDirective(
-        new PathBasedCacheDirective.Builder().
+    filesystem.modifyCacheDirective(
+        new CacheDirectiveInfo.Builder().
             setId(id).
             setReplication((short)2).
             build());
     // OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE
-    filesystem.removePathBasedCacheDirective(id);
+    filesystem.removeCacheDirective(id);
     // OP_REMOVE_CACHE_POOL
     filesystem.removeCachePool("pool1");
   }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1544306&r1=1544305&r2=1544306&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Thu Nov 21 20:06:09 2013
@@ -73,6 +73,7 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
@@ -886,8 +887,8 @@ public class TestDFSClientRetries {
     try {
       cluster.waitActive();
       final DistributedFileSystem dfs = cluster.getFileSystem();
-      final FileSystem fs = isWebHDFS?
-          WebHdfsTestUtil.getWebHdfsFileSystem(conf): dfs;
+      final FileSystem fs = isWebHDFS ? WebHdfsTestUtil.getWebHdfsFileSystem(
+          conf, WebHdfsFileSystem.SCHEME) : dfs;
       final URI uri = dfs.getUri();
       assertTrue(HdfsUtils.isHealthy(uri));
 
@@ -1091,7 +1092,7 @@ public class TestDFSClientRetries {
     final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
         username, new String[]{"supergroup"});
 
-    return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf)
+    return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME)
         : DFSTestUtil.getFileSystemAs(ugi, conf);
   }