You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2015/07/28 20:14:04 UTC

[1/6] hadoop git commit: YARN-3846. RM Web UI queue filter is not working for sub queue. Contributed by Mohammad Shahid Khan

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 2ebe8c7cb -> c78518749


YARN-3846. RM Web UI queue filter is not working for sub queue. Contributed by Mohammad Shahid Khan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3572ebd7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3572ebd7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3572ebd7

Branch: refs/heads/HDFS-7240
Commit: 3572ebd738aa5fa8b0906d75fb12cc6cbb991573
Parents: 3e6fce9
Author: Jian He <ji...@apache.org>
Authored: Mon Jul 27 16:57:11 2015 -0700
Committer: Jian He <ji...@apache.org>
Committed: Mon Jul 27 17:12:05 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                 | 3 +++
 .../server/resourcemanager/webapp/CapacitySchedulerPage.java    | 5 ++++-
 2 files changed, 7 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3572ebd7/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 534c55a..4f8484a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -695,6 +695,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3958. TestYarnConfigurationFields should be moved to hadoop-yarn-api
     module. (Varun Saxena via aajisaka)
 
+    YARN-3846. RM Web UI queue filter is not working for sub queue.
+    (Mohammad Shahid Khan via jianhe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3572ebd7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index 12a3013..d8971b7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -516,7 +516,10 @@ class CapacitySchedulerPage extends RmView {
           "  $('#cs').bind('select_node.jstree', function(e, data) {",
           "    var q = $('.q', data.rslt.obj).first().text();",
           "    if (q == 'Queue: root') q = '';",
-          "    else q = '^' + q.substr(q.lastIndexOf(':') + 2) + '$';",
+          "    else {",
+          "      q = q.substr(q.lastIndexOf(':') + 2);",
+          "      q = '^' + q.substr(q.lastIndexOf('.') + 1) + '$';",
+          "    }",
           "    $('#apps').dataTable().fnFilter(q, 4, true);",
           "  });",
           "  $('#cs').show();",


[2/6] hadoop git commit: HADOOP-12245. References to misspelled REMAINING_QUATA in FileSystemShell.md. Contributed by Gabor Liptak.

Posted by ae...@apache.org.
HADOOP-12245. References to misspelled REMAINING_QUATA in FileSystemShell.md. Contributed by Gabor Liptak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e21dde50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e21dde50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e21dde50

Branch: refs/heads/HDFS-7240
Commit: e21dde501aa9323b7f34b4bc4ba9d282ec4f2707
Parents: 3572ebd
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Jul 28 11:33:10 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Jul 28 11:33:10 2015 +0900

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../hadoop-common/src/site/markdown/FileSystemShell.md            | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e21dde50/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index baf39e3..aeaa5b9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1017,6 +1017,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12239. StorageException complaining " no lease ID" when updating
     FolderLastModifiedTime in WASB. (Duo Xu via cnauroth)
 
+    HADOOP-12245. References to misspelled REMAINING_QUATA in
+    FileSystemShell.md. (Gabor Liptak via aajisaka)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e21dde50/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index 144cb73..fb89ca1 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -174,7 +174,7 @@ Usage: `hadoop fs -count [-q] [-h] [-v] <paths> `
 
 Count the number of directories, files and bytes under the paths that match the specified file pattern. The output columns with -count are: DIR\_COUNT, FILE\_COUNT, CONTENT\_SIZE, PATHNAME
 
-The output columns with -count -q are: QUOTA, REMAINING\_QUATA, SPACE\_QUOTA, REMAINING\_SPACE\_QUOTA, DIR\_COUNT, FILE\_COUNT, CONTENT\_SIZE, PATHNAME
+The output columns with -count -q are: QUOTA, REMAINING\_QUOTA, SPACE\_QUOTA, REMAINING\_SPACE\_QUOTA, DIR\_COUNT, FILE\_COUNT, CONTENT\_SIZE, PATHNAME
 
 The -h option shows sizes in human readable format.
 


[3/6] hadoop git commit: HDFS-7858. Improve HA Namenode Failover detection on the client. (asuresh)

Posted by ae...@apache.org.
HDFS-7858. Improve HA Namenode Failover detection on the client. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/030fcfa9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/030fcfa9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/030fcfa9

Branch: refs/heads/HDFS-7240
Commit: 030fcfa99c345ad57625486eeabedebf2fd4411f
Parents: e21dde5
Author: Arun Suresh <as...@apache.org>
Authored: Mon Jul 27 23:02:03 2015 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Mon Jul 27 23:02:03 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/io/retry/MultiException.java  |  49 +++
 .../hadoop/io/retry/RetryInvocationHandler.java |  99 +++++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 .../ha/ConfiguredFailoverProxyProvider.java     |  52 ++-
 .../ha/RequestHedgingProxyProvider.java         | 186 ++++++++++
 .../markdown/HDFSHighAvailabilityWithNFS.md     |   9 +-
 .../markdown/HDFSHighAvailabilityWithQJM.md     |  10 +-
 .../ha/TestRequestHedgingProxyProvider.java     | 350 +++++++++++++++++++
 8 files changed, 724 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/MultiException.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/MultiException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/MultiException.java
new file mode 100644
index 0000000..4963a2d
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/MultiException.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.io.retry;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Holder class that clients can use to return multiple exceptions.
+ */
+public class MultiException extends IOException {
+
+  private final Map<String, Exception> exes;
+
+  public MultiException(Map<String, Exception> exes) {
+    this.exes = exes;
+  }
+
+  public Map<String, Exception> getExceptions() {
+    return exes;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("{");
+    for (Exception e : exes.values()) {
+      sb.append(e.toString()).append(", ");
+    }
+    sb.append("}");
+    return "MultiException[" + sb.toString() + "]";
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
index 543567e..9256356 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
@@ -23,6 +23,8 @@ import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
 import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
 import java.util.Map;
 
 import org.apache.commons.logging.Log;
@@ -101,7 +103,7 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
         Object ret = invokeMethod(method, args);
         hasMadeASuccessfulCall = true;
         return ret;
-      } catch (Exception e) {
+      } catch (Exception ex) {
         boolean isIdempotentOrAtMostOnce = proxyProvider.getInterface()
             .getMethod(method.getName(), method.getParameterTypes())
             .isAnnotationPresent(Idempotent.class);
@@ -110,15 +112,16 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
               .getMethod(method.getName(), method.getParameterTypes())
               .isAnnotationPresent(AtMostOnce.class);
         }
-        RetryAction action = policy.shouldRetry(e, retries++,
-            invocationFailoverCount, isIdempotentOrAtMostOnce);
-        if (action.action == RetryAction.RetryDecision.FAIL) {
-          if (action.reason != null) {
+        List<RetryAction> actions = extractActions(policy, ex, retries++,
+                invocationFailoverCount, isIdempotentOrAtMostOnce);
+        RetryAction failAction = getFailAction(actions);
+        if (failAction != null) {
+          if (failAction.reason != null) {
             LOG.warn("Exception while invoking " + currentProxy.proxy.getClass()
                 + "." + method.getName() + " over " + currentProxy.proxyInfo
-                + ". Not retrying because " + action.reason, e);
+                + ". Not retrying because " + failAction.reason, ex);
           }
-          throw e;
+          throw ex;
         } else { // retry or failover
           // avoid logging the failover if this is the first call on this
           // proxy object, and we successfully achieve the failover without
@@ -126,8 +129,9 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
           boolean worthLogging = 
             !(invocationFailoverCount == 0 && !hasMadeASuccessfulCall);
           worthLogging |= LOG.isDebugEnabled();
-          if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY &&
-              worthLogging) {
+          RetryAction failOverAction = getFailOverAction(actions);
+          long delay = getDelayMillis(actions);
+          if (failOverAction != null && worthLogging) {
             String msg = "Exception while invoking " + method.getName()
                 + " of class " + currentProxy.proxy.getClass().getSimpleName()
                 + " over " + currentProxy.proxyInfo;
@@ -135,22 +139,22 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
             if (invocationFailoverCount > 0) {
               msg += " after " + invocationFailoverCount + " fail over attempts"; 
             }
-            msg += ". Trying to fail over " + formatSleepMessage(action.delayMillis);
-            LOG.info(msg, e);
+            msg += ". Trying to fail over " + formatSleepMessage(delay);
+            LOG.info(msg, ex);
           } else {
             if(LOG.isDebugEnabled()) {
               LOG.debug("Exception while invoking " + method.getName()
                   + " of class " + currentProxy.proxy.getClass().getSimpleName()
                   + " over " + currentProxy.proxyInfo + ". Retrying "
-                  + formatSleepMessage(action.delayMillis), e);
+                  + formatSleepMessage(delay), ex);
             }
           }
-          
-          if (action.delayMillis > 0) {
-            Thread.sleep(action.delayMillis);
+
+          if (delay > 0) {
+            Thread.sleep(delay);
           }
           
-          if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY) {
+          if (failOverAction != null) {
             // Make sure that concurrent failed method invocations only cause a
             // single actual fail over.
             synchronized (proxyProvider) {
@@ -169,7 +173,68 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
       }
     }
   }
-  
+
+  /**
+   * Obtain a retry delay from list of RetryActions.
+   */
+  private long getDelayMillis(List<RetryAction> actions) {
+    long retVal = 0;
+    for (RetryAction action : actions) {
+      if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY ||
+              action.action == RetryAction.RetryDecision.RETRY) {
+        if (action.delayMillis > retVal) {
+          retVal = action.delayMillis;
+        }
+      }
+    }
+    return retVal;
+  }
+
+  /**
+   * Return the first FAILOVER_AND_RETRY action.
+   */
+  private RetryAction getFailOverAction(List<RetryAction> actions) {
+    for (RetryAction action : actions) {
+      if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY) {
+        return action;
+      }
+    }
+    return null;
+  }
+
+  /**
+   * Return the last FAIL action.. only if there are no RETRY actions.
+   */
+  private RetryAction getFailAction(List<RetryAction> actions) {
+    RetryAction fAction = null;
+    for (RetryAction action : actions) {
+      if (action.action == RetryAction.RetryDecision.FAIL) {
+        fAction = action;
+      } else {
+        // Atleast 1 RETRY
+        return null;
+      }
+    }
+    return fAction;
+  }
+
+  private List<RetryAction> extractActions(RetryPolicy policy, Exception ex,
+                                           int i, int invocationFailoverCount,
+                                           boolean isIdempotentOrAtMostOnce)
+          throws Exception {
+    List<RetryAction> actions = new LinkedList<>();
+    if (ex instanceof MultiException) {
+      for (Exception th : ((MultiException) ex).getExceptions().values()) {
+        actions.add(policy.shouldRetry(th, i, invocationFailoverCount,
+                isIdempotentOrAtMostOnce));
+      }
+    } else {
+      actions.add(policy.shouldRetry(ex, i,
+              invocationFailoverCount, isIdempotentOrAtMostOnce));
+    }
+    return actions;
+  }
+
   private static String formatSleepMessage(long millis) {
     if (millis > 0) {
       return "after sleeping for " + millis + "ms.";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cc2a833..9b2de81 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -753,6 +753,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-8735. Inotify: All events classes should implement toString() API.
     (Surendra Singh Lilhore via aajisaka)
 
+    HDFS-7858. Improve HA Namenode Failover detection on the client. (asuresh)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
index 235c886..ccce736 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
@@ -25,6 +25,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -38,6 +39,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.security.UserGroupInformation;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
 /**
@@ -51,16 +53,40 @@ public class ConfiguredFailoverProxyProvider<T> extends
   private static final Log LOG =
       LogFactory.getLog(ConfiguredFailoverProxyProvider.class);
   
-  private final Configuration conf;
-  private final List<AddressRpcProxyPair<T>> proxies =
+  interface ProxyFactory<T> {
+    T createProxy(Configuration conf, InetSocketAddress nnAddr, Class<T> xface,
+        UserGroupInformation ugi, boolean withRetries,
+        AtomicBoolean fallbackToSimpleAuth) throws IOException;
+  }
+
+  static class DefaultProxyFactory<T> implements ProxyFactory<T> {
+    @Override
+    public T createProxy(Configuration conf, InetSocketAddress nnAddr,
+        Class<T> xface, UserGroupInformation ugi, boolean withRetries,
+        AtomicBoolean fallbackToSimpleAuth) throws IOException {
+      return NameNodeProxies.createNonHAProxy(conf,
+          nnAddr, xface, ugi, false, fallbackToSimpleAuth).getProxy();
+    }
+  }
+
+  protected final Configuration conf;
+  protected final List<AddressRpcProxyPair<T>> proxies =
       new ArrayList<AddressRpcProxyPair<T>>();
   private final UserGroupInformation ugi;
-  private final Class<T> xface;
-  
+  protected final Class<T> xface;
+
   private int currentProxyIndex = 0;
+  private final ProxyFactory<T> factory;
 
   public ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
       Class<T> xface) {
+    this(conf, uri, xface, new DefaultProxyFactory<T>());
+  }
+
+  @VisibleForTesting
+  ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
+      Class<T> xface, ProxyFactory<T> factory) {
+
     Preconditions.checkArgument(
         xface.isAssignableFrom(NamenodeProtocols.class),
         "Interface class %s is not a valid NameNode protocol!");
@@ -78,9 +104,10 @@ public class ConfiguredFailoverProxyProvider<T> extends
         HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
         HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT);
     this.conf.setInt(
-        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
-        maxRetriesOnSocketTimeouts);
-    
+            CommonConfigurationKeysPublic
+                    .IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
+            maxRetriesOnSocketTimeouts);
+
     try {
       ugi = UserGroupInformation.getCurrentUser();
       
@@ -102,6 +129,7 @@ public class ConfiguredFailoverProxyProvider<T> extends
       // URI of the cluster. Clone this token to apply to each of the
       // underlying IPC addresses so that the IPC code can find it.
       HAUtil.cloneDelegationTokenForLogicalUri(ugi, uri, addressesOfNns);
+      this.factory = factory;
     } catch (IOException e) {
       throw new RuntimeException(e);
     }
@@ -120,8 +148,8 @@ public class ConfiguredFailoverProxyProvider<T> extends
     AddressRpcProxyPair<T> current = proxies.get(currentProxyIndex);
     if (current.namenode == null) {
       try {
-        current.namenode = NameNodeProxies.createNonHAProxy(conf,
-            current.address, xface, ugi, false, fallbackToSimpleAuth).getProxy();
+        current.namenode = factory.createProxy(conf,
+            current.address, xface, ugi, false, fallbackToSimpleAuth);
       } catch (IOException e) {
         LOG.error("Failed to create RPC proxy to NameNode", e);
         throw new RuntimeException(e);
@@ -131,7 +159,11 @@ public class ConfiguredFailoverProxyProvider<T> extends
   }
 
   @Override
-  public synchronized void performFailover(T currentProxy) {
+  public  void performFailover(T currentProxy) {
+    incrementProxyIndex();
+  }
+
+  synchronized void incrementProxyIndex() {
     currentProxyIndex = (currentProxyIndex + 1) % proxies.size();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
new file mode 100644
index 0000000..b7216b0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.ha;
+
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.net.URI;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.io.retry.MultiException;
+
+/**
+ * A FailoverProxyProvider implementation that technically does not "failover"
+ * per-se. It constructs a wrapper proxy that sends the request to ALL
+ * underlying proxies simultaneously. It assumes the in an HA setup, there will
+ * be only one Active, and the active should respond faster than any configured
+ * standbys. Once it recieve a response from any one of the configred proxies,
+ * outstanding requests to other proxies are immediately cancelled.
+ */
+public class RequestHedgingProxyProvider<T> extends
+        ConfiguredFailoverProxyProvider<T> {
+
+  private static final Log LOG =
+          LogFactory.getLog(RequestHedgingProxyProvider.class);
+
+  class RequestHedgingInvocationHandler implements InvocationHandler {
+
+    final Map<String, ProxyInfo<T>> targetProxies;
+
+    public RequestHedgingInvocationHandler(
+            Map<String, ProxyInfo<T>> targetProxies) {
+      this.targetProxies = new HashMap<>(targetProxies);
+    }
+
+    /**
+     * Creates a Executor and invokes all proxies concurrently. This
+     * implementation assumes that Clients have configured proper socket
+     * timeouts, else the call can block forever.
+     *
+     * @param proxy
+     * @param method
+     * @param args
+     * @return
+     * @throws Throwable
+     */
+    @Override
+    public Object
+    invoke(Object proxy, final Method method, final Object[] args)
+            throws Throwable {
+      Map<Future<Object>, ProxyInfo<T>> proxyMap = new HashMap<>();
+      int numAttempts = 0;
+
+      ExecutorService executor = null;
+      CompletionService<Object> completionService;
+      try {
+        // Optimization : if only 2 proxies are configured and one had failed
+        // over, then we dont need to create a threadpool etc.
+        targetProxies.remove(toIgnore);
+        if (targetProxies.size() == 1) {
+          ProxyInfo<T> proxyInfo = targetProxies.values().iterator().next();
+          Object retVal = method.invoke(proxyInfo.proxy, args);
+          successfulProxy = proxyInfo;
+          return retVal;
+        }
+        executor = Executors.newFixedThreadPool(proxies.size());
+        completionService = new ExecutorCompletionService<>(executor);
+        for (final Map.Entry<String, ProxyInfo<T>> pEntry :
+                targetProxies.entrySet()) {
+          Callable<Object> c = new Callable<Object>() {
+            @Override
+            public Object call() throws Exception {
+              return method.invoke(pEntry.getValue().proxy, args);
+            }
+          };
+          proxyMap.put(completionService.submit(c), pEntry.getValue());
+          numAttempts++;
+        }
+
+        Map<String, Exception> badResults = new HashMap<>();
+        while (numAttempts > 0) {
+          Future<Object> callResultFuture = completionService.take();
+          Object retVal;
+          try {
+            retVal = callResultFuture.get();
+            successfulProxy = proxyMap.get(callResultFuture);
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Invocation successful on ["
+                      + successfulProxy.proxyInfo + "]");
+            }
+            return retVal;
+          } catch (Exception ex) {
+            ProxyInfo<T> tProxyInfo = proxyMap.get(callResultFuture);
+            LOG.warn("Invocation returned exception on "
+                    + "[" + tProxyInfo.proxyInfo + "]");
+            badResults.put(tProxyInfo.proxyInfo, ex);
+            numAttempts--;
+          }
+        }
+
+        // At this point we should have All bad results (Exceptions)
+        // Or should have returned with successful result.
+        if (badResults.size() == 1) {
+          throw badResults.values().iterator().next();
+        } else {
+          throw new MultiException(badResults);
+        }
+      } finally {
+        if (executor != null) {
+          executor.shutdownNow();
+        }
+      }
+    }
+  }
+
+
+  private volatile ProxyInfo<T> successfulProxy = null;
+  private volatile String toIgnore = null;
+
+  public RequestHedgingProxyProvider(
+          Configuration conf, URI uri, Class<T> xface) {
+    this(conf, uri, xface, new DefaultProxyFactory<T>());
+  }
+
+  @VisibleForTesting
+  RequestHedgingProxyProvider(Configuration conf, URI uri,
+                              Class<T> xface, ProxyFactory<T> factory) {
+    super(conf, uri, xface, factory);
+  }
+
+  @SuppressWarnings("unchecked")
+  @Override
+  public synchronized ProxyInfo<T> getProxy() {
+    if (successfulProxy != null) {
+      return successfulProxy;
+    }
+    Map<String, ProxyInfo<T>> targetProxyInfos = new HashMap<>();
+    StringBuilder combinedInfo = new StringBuilder('[');
+    for (int i = 0; i < proxies.size(); i++) {
+      ProxyInfo<T> pInfo = super.getProxy();
+      incrementProxyIndex();
+      targetProxyInfos.put(pInfo.proxyInfo, pInfo);
+      combinedInfo.append(pInfo.proxyInfo).append(',');
+    }
+    combinedInfo.append(']');
+    T wrappedProxy = (T) Proxy.newProxyInstance(
+            RequestHedgingInvocationHandler.class.getClassLoader(),
+            new Class<?>[]{xface},
+            new RequestHedgingInvocationHandler(targetProxyInfos));
+    return new ProxyInfo<T>(wrappedProxy, combinedInfo.toString());
+  }
+
+  @Override
+  public synchronized void performFailover(T currentProxy) {
+    toIgnore = successfulProxy.proxyInfo;
+    successfulProxy = null;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
index cc53a38..51a88c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
@@ -195,9 +195,12 @@ The order in which you set these configurations is unimportant, but the values y
 
     Configure the name of the Java class which will be used by the DFS Client to
     determine which NameNode is the current Active, and therefore which NameNode is
-    currently serving client requests. The only implementation which currently
-    ships with Hadoop is the **ConfiguredFailoverProxyProvider**, so use this
-    unless you are using a custom one. For example:
+    currently serving client requests. The two implementations which currently
+    ship with Hadoop are the **ConfiguredFailoverProxyProvider** and the
+    **RequestHedgingProxyProvider** (which, for the first call, concurrently invokes all
+    namenodes to determine the active one, and on subsequent requests, invokes the active
+    namenode until a fail-over happens), so use one of these unless you are using a custom
+    proxy provider.
 
         <property>
           <name>dfs.client.failover.proxy.provider.mycluster</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
index d9d9a67..8b42386 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
@@ -216,9 +216,13 @@ The order in which you set these configurations is unimportant, but the values y
 
     Configure the name of the Java class which will be used by the DFS Client to
     determine which NameNode is the current Active, and therefore which NameNode is
-    currently serving client requests. The only implementation which currently
-    ships with Hadoop is the **ConfiguredFailoverProxyProvider**, so use this
-    unless you are using a custom one. For example:
+    currently serving client requests. The two implementations which currently
+    ship with Hadoop are the **ConfiguredFailoverProxyProvider** and the
+    **RequestHedgingProxyProvider** (which, for the first call, concurrently invokes all
+    namenodes to determine the active one, and on subsequent requests, invokes the active
+    namenode until a fail-over happens), so use one of these unless you are using a custom
+    proxy provider.
+    For example:
 
         <property>
           <name>dfs.client.failover.proxy.provider.mycluster</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/030fcfa9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
new file mode 100644
index 0000000..32f807a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -0,0 +1,350 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.ha;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Iterator;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider.ProxyFactory;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.io.retry.MultiException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+import com.google.common.collect.Lists;
+
+public class TestRequestHedgingProxyProvider {
+
+  private Configuration conf;
+  private URI nnUri;
+  private String ns;
+
+  @Before
+  public void setup() throws URISyntaxException {
+    ns = "mycluster-" + Time.monotonicNow();
+    nnUri = new URI("hdfs://" + ns);
+    conf = new Configuration();
+    conf.set(DFSConfigKeys.DFS_NAMESERVICES, ns);
+    conf.set(
+        DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns, "nn1,nn2");
+    conf.set(
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn1",
+        "machine1.foo.bar:8020");
+    conf.set(
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn2",
+        "machine2.foo.bar:8020");
+  }
+
+  @Test
+  public void testHedgingWhenOneFails() throws Exception {
+    final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
+    Mockito.when(goodMock.getStats()).thenReturn(new long[] {1});
+    final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
+    Mockito.when(badMock.getStats()).thenThrow(new IOException("Bad mock !!"));
+
+    RequestHedgingProxyProvider<NamenodeProtocols> provider =
+        new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class,
+            createFactory(goodMock, badMock));
+    long[] stats = provider.getProxy().proxy.getStats();
+    Assert.assertTrue(stats.length == 1);
+    Mockito.verify(badMock).getStats();
+    Mockito.verify(goodMock).getStats();
+  }
+
+  @Test
+  public void testHedgingWhenOneIsSlow() throws Exception {
+    final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
+    Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {
+      @Override
+      public long[] answer(InvocationOnMock invocation) throws Throwable {
+        Thread.sleep(1000);
+        return new long[]{1};
+      }
+    });
+    final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
+    Mockito.when(badMock.getStats()).thenThrow(new IOException("Bad mock !!"));
+
+    RequestHedgingProxyProvider<NamenodeProtocols> provider =
+        new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class,
+            createFactory(goodMock, badMock));
+    long[] stats = provider.getProxy().proxy.getStats();
+    Assert.assertTrue(stats.length == 1);
+    Assert.assertEquals(1, stats[0]);
+    Mockito.verify(badMock).getStats();
+    Mockito.verify(goodMock).getStats();
+  }
+
+  @Test
+  public void testHedgingWhenBothFail() throws Exception {
+    NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
+    Mockito.when(badMock.getStats()).thenThrow(new IOException("Bad mock !!"));
+    NamenodeProtocols worseMock = Mockito.mock(NamenodeProtocols.class);
+    Mockito.when(worseMock.getStats()).thenThrow(
+            new IOException("Worse mock !!"));
+
+    RequestHedgingProxyProvider<NamenodeProtocols> provider =
+        new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class,
+            createFactory(badMock, worseMock));
+    try {
+      provider.getProxy().proxy.getStats();
+      Assert.fail("Should fail since both namenodes throw IOException !!");
+    } catch (Exception e) {
+      Assert.assertTrue(e instanceof MultiException);
+    }
+    Mockito.verify(badMock).getStats();
+    Mockito.verify(worseMock).getStats();
+  }
+
+  @Test
+  public void testPerformFailover() throws Exception {
+    final AtomicInteger counter = new AtomicInteger(0);
+    final int[] isGood = {1};
+    final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
+    Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {
+      @Override
+      public long[] answer(InvocationOnMock invocation) throws Throwable {
+        counter.incrementAndGet();
+        if (isGood[0] == 1) {
+          Thread.sleep(1000);
+          return new long[]{1};
+        }
+        throw new IOException("Was Good mock !!");
+      }
+    });
+    final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
+    Mockito.when(badMock.getStats()).thenAnswer(new Answer<long[]>() {
+      @Override
+      public long[] answer(InvocationOnMock invocation) throws Throwable {
+        counter.incrementAndGet();
+        if (isGood[0] == 2) {
+          Thread.sleep(1000);
+          return new long[]{2};
+        }
+        throw new IOException("Bad mock !!");
+      }
+    });
+    RequestHedgingProxyProvider<NamenodeProtocols> provider =
+            new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class,
+                    createFactory(goodMock, badMock));
+    long[] stats = provider.getProxy().proxy.getStats();
+    Assert.assertTrue(stats.length == 1);
+    Assert.assertEquals(1, stats[0]);
+    Assert.assertEquals(2, counter.get());
+    Mockito.verify(badMock).getStats();
+    Mockito.verify(goodMock).getStats();
+
+    stats = provider.getProxy().proxy.getStats();
+    Assert.assertTrue(stats.length == 1);
+    Assert.assertEquals(1, stats[0]);
+    // Ensure only the previous successful one is invoked
+    Mockito.verifyNoMoreInteractions(badMock);
+    Assert.assertEquals(3, counter.get());
+
+    // Flip to standby.. so now this should fail
+    isGood[0] = 2;
+    try {
+      provider.getProxy().proxy.getStats();
+      Assert.fail("Should fail since previously successful proxy now fails ");
+    } catch (Exception ex) {
+      Assert.assertTrue(ex instanceof IOException);
+    }
+
+    Assert.assertEquals(4, counter.get());
+
+    provider.performFailover(provider.getProxy().proxy);
+    stats = provider.getProxy().proxy.getStats();
+    Assert.assertTrue(stats.length == 1);
+    Assert.assertEquals(2, stats[0]);
+
+    // Counter shuodl update only once
+    Assert.assertEquals(5, counter.get());
+
+    stats = provider.getProxy().proxy.getStats();
+    Assert.assertTrue(stats.length == 1);
+    Assert.assertEquals(2, stats[0]);
+
+    // Counter updates only once now
+    Assert.assertEquals(6, counter.get());
+
+    // Flip back to old active.. so now this should fail
+    isGood[0] = 1;
+    try {
+      provider.getProxy().proxy.getStats();
+      Assert.fail("Should fail since previously successful proxy now fails ");
+    } catch (Exception ex) {
+      Assert.assertTrue(ex instanceof IOException);
+    }
+
+    Assert.assertEquals(7, counter.get());
+
+    provider.performFailover(provider.getProxy().proxy);
+    stats = provider.getProxy().proxy.getStats();
+    Assert.assertTrue(stats.length == 1);
+    // Ensure correct proxy was called
+    Assert.assertEquals(1, stats[0]);
+  }
+
+  @Test
+  public void testPerformFailoverWith3Proxies() throws Exception {
+    conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns,
+            "nn1,nn2,nn3");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn3",
+            "machine3.foo.bar:8020");
+
+    final AtomicInteger counter = new AtomicInteger(0);
+    final int[] isGood = {1};
+    final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
+    Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {
+      @Override
+      public long[] answer(InvocationOnMock invocation) throws Throwable {
+        counter.incrementAndGet();
+        if (isGood[0] == 1) {
+          Thread.sleep(1000);
+          return new long[]{1};
+        }
+        throw new IOException("Was Good mock !!");
+      }
+    });
+    final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
+    Mockito.when(badMock.getStats()).thenAnswer(new Answer<long[]>() {
+      @Override
+      public long[] answer(InvocationOnMock invocation) throws Throwable {
+        counter.incrementAndGet();
+        if (isGood[0] == 2) {
+          Thread.sleep(1000);
+          return new long[]{2};
+        }
+        throw new IOException("Bad mock !!");
+      }
+    });
+    final NamenodeProtocols worseMock = Mockito.mock(NamenodeProtocols.class);
+    Mockito.when(worseMock.getStats()).thenAnswer(new Answer<long[]>() {
+      @Override
+      public long[] answer(InvocationOnMock invocation) throws Throwable {
+        counter.incrementAndGet();
+        if (isGood[0] == 3) {
+          Thread.sleep(1000);
+          return new long[]{3};
+        }
+        throw new IOException("Worse mock !!");
+      }
+    });
+
+    RequestHedgingProxyProvider<NamenodeProtocols> provider =
+            new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class,
+                    createFactory(goodMock, badMock, worseMock));
+    long[] stats = provider.getProxy().proxy.getStats();
+    Assert.assertTrue(stats.length == 1);
+    Assert.assertEquals(1, stats[0]);
+    Assert.assertEquals(3, counter.get());
+    Mockito.verify(badMock).getStats();
+    Mockito.verify(goodMock).getStats();
+    Mockito.verify(worseMock).getStats();
+
+    stats = provider.getProxy().proxy.getStats();
+    Assert.assertTrue(stats.length == 1);
+    Assert.assertEquals(1, stats[0]);
+    // Ensure only the previous successful one is invoked
+    Mockito.verifyNoMoreInteractions(badMock);
+    Mockito.verifyNoMoreInteractions(worseMock);
+    Assert.assertEquals(4, counter.get());
+
+    // Flip to standby.. so now this should fail
+    isGood[0] = 2;
+    try {
+      provider.getProxy().proxy.getStats();
+      Assert.fail("Should fail since previously successful proxy now fails ");
+    } catch (Exception ex) {
+      Assert.assertTrue(ex instanceof IOException);
+    }
+
+    Assert.assertEquals(5, counter.get());
+
+    provider.performFailover(provider.getProxy().proxy);
+    stats = provider.getProxy().proxy.getStats();
+    Assert.assertTrue(stats.length == 1);
+    Assert.assertEquals(2, stats[0]);
+
+    // Counter updates twice since both proxies are tried on failure
+    Assert.assertEquals(7, counter.get());
+
+    stats = provider.getProxy().proxy.getStats();
+    Assert.assertTrue(stats.length == 1);
+    Assert.assertEquals(2, stats[0]);
+
+    // Counter updates only once now
+    Assert.assertEquals(8, counter.get());
+
+    // Flip to Other standby.. so now this should fail
+    isGood[0] = 3;
+    try {
+      provider.getProxy().proxy.getStats();
+      Assert.fail("Should fail since previously successful proxy now fails ");
+    } catch (Exception ex) {
+      Assert.assertTrue(ex instanceof IOException);
+    }
+
+    // Counter should ipdate only 1 time
+    Assert.assertEquals(9, counter.get());
+
+    provider.performFailover(provider.getProxy().proxy);
+    stats = provider.getProxy().proxy.getStats();
+    Assert.assertTrue(stats.length == 1);
+
+    // Ensure correct proxy was called
+    Assert.assertEquals(3, stats[0]);
+
+    // Counter updates twice since both proxies are tried on failure
+    Assert.assertEquals(11, counter.get());
+
+    stats = provider.getProxy().proxy.getStats();
+    Assert.assertTrue(stats.length == 1);
+    Assert.assertEquals(3, stats[0]);
+
+    // Counter updates only once now
+    Assert.assertEquals(12, counter.get());
+  }
+
+  private ProxyFactory<NamenodeProtocols> createFactory(
+      NamenodeProtocols... protos) {
+    final Iterator<NamenodeProtocols> iterator =
+        Lists.newArrayList(protos).iterator();
+    return new ProxyFactory<NamenodeProtocols>() {
+      @Override
+      public NamenodeProtocols createProxy(Configuration conf,
+          InetSocketAddress nnAddr, Class<NamenodeProtocols> xface,
+          UserGroupInformation ugi, boolean withRetries,
+          AtomicBoolean fallbackToSimpleAuth) throws IOException {
+        return iterator.next();
+      }
+    };
+  }
+}


[4/6] hadoop git commit: YARN-3982. container-executor parsing of container-executor.cfg broken in trunk and branch-2. Contributed by Varun Vasudev

Posted by ae...@apache.org.
YARN-3982. container-executor parsing of container-executor.cfg broken
in trunk and branch-2. Contributed by Varun Vasudev


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1709342
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1709342
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1709342

Branch: refs/heads/HDFS-7240
Commit: f17093421521efcbdc813f6f2b8411e45ecc7863
Parents: 030fcfa
Author: Xuan <xg...@apache.org>
Authored: Mon Jul 27 23:45:58 2015 -0700
Committer: Xuan <xg...@apache.org>
Committed: Mon Jul 27 23:45:58 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +++
 .../container-executor/impl/configuration.c     |  4 ++--
 .../test/test-container-executor.c              | 22 +++++++++++++-------
 3 files changed, 20 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1709342/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4f8484a..b4666e8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -698,6 +698,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3846. RM Web UI queue filter is not working for sub queue.
     (Mohammad Shahid Khan via jianhe)
 
+    YARN-3982. container-executor parsing of container-executor.cfg broken in
+    trunk and branch-2. (Varun Vasudev via xgong)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1709342/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
index 2825367..373dbfd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
@@ -284,11 +284,11 @@ char * get_value(const char* key) {
 
 /**
  * Function to return an array of values for a key.
- * Value delimiter is assumed to be a '%'.
+ * Value delimiter is assumed to be a ','.
  */
 char ** get_values(const char * key) {
   char *value = get_value(key);
-  return extract_values(value);
+  return extract_values_delim(value, ",");
 }
 
 char ** extract_values_delim(char *value, const char *delim) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1709342/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 99bcf34..001a37d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -145,7 +145,7 @@ void check_pid_file(const char* pid_file, pid_t mypid) {
   }
 
   char myPidBuf[33];
-  snprintf(myPidBuf, 33, "%" PRId64, (int64_t)mypid);
+  snprintf(myPidBuf, 33, "%" PRId64, (int64_t)(mypid + 1));
   if (strncmp(pidBuf, myPidBuf, strlen(myPidBuf)) != 0) {
     printf("FAIL: failed to find matching pid in pid file\n");
     printf("FAIL: Expected pid %" PRId64 " : Got %.*s", (int64_t)mypid,
@@ -212,15 +212,15 @@ void test_get_app_log_dir() {
   free(logdir);
 }
 
-void test_check_user() {
+void test_check_user(int expectedFailure) {
   printf("\nTesting test_check_user\n");
   struct passwd *user = check_user(username);
-  if (user == NULL) {
+  if (user == NULL && !expectedFailure) {
     printf("FAIL: failed check for user %s\n", username);
     exit(1);
   }
   free(user);
-  if (check_user("lp") != NULL) {
+  if (check_user("lp") != NULL && !expectedFailure) {
     printf("FAIL: failed check for system user lp\n");
     exit(1);
   }
@@ -228,7 +228,7 @@ void test_check_user() {
     printf("FAIL: failed check for system user root\n");
     exit(1);
   }
-  if (check_user("daemon") == NULL) {
+  if (check_user("daemon") == NULL && !expectedFailure) {
     printf("FAIL: failed check for whitelisted system user daemon\n");
     exit(1);
   }
@@ -467,6 +467,7 @@ void test_signal_container() {
     printf("FAIL: fork failed\n");
     exit(1);
   } else if (child == 0) {
+    printf("\nSwitching to user %d\n", user_detail->pw_uid);
     if (change_user(user_detail->pw_uid, user_detail->pw_gid) != 0) {
       exit(1);
     }
@@ -474,6 +475,10 @@ void test_signal_container() {
     exit(0);
   } else {
     printf("Child container launched as %" PRId64 "\n", (int64_t)child);
+    printf("Signaling container as user %s\n", yarn_username);
+    // there's a race condition for child calling change_user and us
+    // calling signal_container_as_user, hence sleeping
+    sleep(3);
     if (signal_container_as_user(yarn_username, child, SIGQUIT) != 0) {
       exit(1);
     }
@@ -805,7 +810,7 @@ int main(int argc, char **argv) {
   printf("\nTesting delete_app()\n");
   test_delete_app();
 
-  test_check_user();
+  test_check_user(0);
 
   // the tests that change user need to be run in a subshell, so that
   // when they change user they don't give up our privs
@@ -832,7 +837,10 @@ int main(int argc, char **argv) {
 
   read_config(TEST_ROOT "/test.cfg");
   username = "bin";
-  test_check_user();
+  test_check_user(1);
+
+  username = "sys";
+  test_check_user(1);
 
   run("rm -fr " TEST_ROOT);
   printf("\nFinished tests\n");


[5/6] hadoop git commit: Merge branch 'trunk' into HDFS-7240

Posted by ae...@apache.org.
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/188d283a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/188d283a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/188d283a

Branch: refs/heads/HDFS-7240
Commit: 188d283a4debe2151ee3327ac23bad86b9b6aba3
Parents: 2ebe8c7 f170934
Author: Anu Engineer <ae...@apache.org>
Authored: Tue Jul 28 11:12:08 2015 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue Jul 28 11:12:08 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../apache/hadoop/io/retry/MultiException.java  |  49 +++
 .../hadoop/io/retry/RetryInvocationHandler.java |  99 +++++-
 .../src/site/markdown/FileSystemShell.md        |   2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 .../ha/ConfiguredFailoverProxyProvider.java     |  52 ++-
 .../ha/RequestHedgingProxyProvider.java         | 186 ++++++++++
 .../markdown/HDFSHighAvailabilityWithNFS.md     |   9 +-
 .../markdown/HDFSHighAvailabilityWithQJM.md     |  10 +-
 .../ha/TestRequestHedgingProxyProvider.java     | 350 +++++++++++++++++++
 hadoop-yarn-project/CHANGES.txt                 |   6 +
 .../container-executor/impl/configuration.c     |   4 +-
 .../test/test-container-executor.c              |  22 +-
 .../webapp/CapacitySchedulerPage.java           |   5 +-
 14 files changed, 755 insertions(+), 44 deletions(-)
----------------------------------------------------------------------



[6/6] hadoop git commit: HDFS-8695. OzoneHandler : Add Bucket REST Interface. (aengineer)

Posted by ae...@apache.org.
HDFS-8695. OzoneHandler : Add Bucket REST Interface. (aengineer)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7851874
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7851874
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7851874

Branch: refs/heads/HDFS-7240
Commit: c78518749e431684cfc7685e1cf3299751b130e6
Parents: 188d283
Author: Anu Engineer <ae...@apache.org>
Authored: Tue Jul 28 11:13:13 2015 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue Jul 28 11:13:13 2015 -0700

----------------------------------------------------------------------
 .../hadoop/ozone/web/exceptions/ErrorTable.java |   5 +
 .../hadoop/ozone/web/handlers/BucketArgs.java   |  10 -
 .../ozone/web/handlers/BucketHandler.java       | 193 +++++++++++++
 .../web/handlers/BucketProcessTemplate.java     | 278 +++++++++++++++++++
 .../apache/hadoop/ozone/web/headers/Header.java |  14 +-
 .../hadoop/ozone/web/interfaces/Bucket.java     | 133 +++++++++
 .../ozone/web/interfaces/StorageHandler.java    |  85 ++++++
 .../web/localstorage/LocalStorageHandler.java   | 109 ++++++++
 8 files changed, 815 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7851874/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java
index a51dac5..7e75cf0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java
@@ -58,6 +58,7 @@ public final class ErrorTable {
       new OzoneException(HTTP_BAD_REQUEST, "malformedACL",
                          "Invalid ACL specified.");
 
+
   public static final OzoneException INVALID_VOLUME_NAME =
       new OzoneException(HTTP_BAD_REQUEST, "invalidVolumeName",
                          "Invalid volume name.");
@@ -81,6 +82,10 @@ public final class ErrorTable {
       new OzoneException(HTTP_BAD_REQUEST, "malformedBucketVersion",
                          "Malformed bucket version or version not unique.");
 
+  public static final OzoneException MALFORMED_STORAGE_TYPE =
+      new OzoneException(HTTP_BAD_REQUEST, "malformedStorageType",
+                         "Invalid storage Type specified.");
+
   public static final OzoneException MALFORMED_STORAGE_CLASS =
       new OzoneException(HTTP_BAD_REQUEST, "malformedStorageClass",
                          "Invalid storage class specified.");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7851874/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java
index 315ae3f..d62c72d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java
@@ -164,16 +164,6 @@ public class BucketArgs extends VolumeArgs {
     return versioning;
   }
 
-  /**
-   * Converts a valid String to Enum for ease of use.
-   *
-   * @param version version string.
-   */
-  public void setVersioning(String version) {
-    if (version != null) {
-      this.versioning = OzoneConsts.Versioning.valueOf(version.toUpperCase());
-    }
-  }
 
   /**
    * SetVersioning Info.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7851874/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java
new file mode 100644
index 0000000..2005367
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java
@@ -0,0 +1,193 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.web.handlers;
+
+import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
+import org.apache.hadoop.ozone.web.exceptions.OzoneException;
+import org.apache.hadoop.ozone.web.headers.Header;
+import org.apache.hadoop.ozone.web.interfaces.Bucket;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.web.utils.OzoneConsts;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.Request;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import java.io.IOException;
+
+import static java.net.HttpURLConnection.HTTP_CREATED;
+import static java.net.HttpURLConnection.HTTP_OK;
+
+
+/**
+ * Bucket Class handles all ozone Bucket related actions.
+ */
+public class BucketHandler implements Bucket {
+  /**
+   * createBucket call handles the POST request for Creating a Bucket.
+   *
+   * @param volume - Volume name
+   * @param bucket - Bucket Name
+   * @param req - Http request
+   * @param info - Uri Info
+   * @param headers - Http headers
+   *
+   * @return Response
+   *
+   * @throws OzoneException
+   */
+  @Override
+  public Response createBucket(String volume, String bucket, Request req,
+                               UriInfo info, HttpHeaders headers)
+      throws OzoneException {
+    return new BucketProcessTemplate() {
+      @Override
+      public Response doProcess(BucketArgs args)
+          throws OzoneException, IOException {
+        StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
+        getAclsFromHeaders(args, false);
+        args.setVersioning(getVersioning(args));
+        args.setStorageType(getStorageType(args));
+        fs.createBucket(args);
+        return OzoneUtils.getResponse(args, HTTP_CREATED, "");
+      }
+    }.handleCall(volume, bucket, req, info, headers);
+  }
+
+  /**
+   * updateBucket call handles the PUT request for updating a Bucket.
+   *
+   * There are only three possible actions currently with updateBucket.
+   * They are add/remove on ACLS, Bucket Versioning and  StorageType.
+   *  if you make a call with any other action, update just returns 200 OK.
+   *
+   * @param volume - Storage volume name
+   * @param bucket - Bucket name
+   * @param req - Http request
+   * @param info - Uri Info
+   * @param headers - Http headers
+   *
+   * @return Response
+   *
+   * @throws OzoneException
+   */
+  @Override
+  public Response updateBucket(String volume, String bucket, Request req,
+                               UriInfo info, HttpHeaders headers)
+      throws OzoneException {
+    return new BucketProcessTemplate() {
+      @Override
+      public Response doProcess(BucketArgs args)
+          throws OzoneException, IOException {
+        StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
+        getAclsFromHeaders(args, true);
+        args.setVersioning(getVersioning(args));
+        args.setStorageType(getStorageType(args));
+
+        if ((args.getAddAcls() != null) || (args.getRemoveAcls() != null)) {
+          fs.setBucketAcls(args);
+        }
+
+        if (args.getVersioning() != OzoneConsts.Versioning.NOT_DEFINED) {
+          fs.setBucketVersioning(args);
+        }
+
+        if (args.getStorageType() != null) {
+          fs.setBucketStorageClass(args);
+        }
+        return OzoneUtils.getResponse(args, HTTP_OK, "");
+      }
+    }.handleCall(volume, bucket, req, info, headers);
+  }
+
+  /**
+   * Deletes an empty bucket.
+   *
+   * @param volume Volume name
+   * @param bucket Bucket Name
+   * @param req - Http request
+   * @param info - Uri Info
+   * @param headers - Http headers
+   *
+   * @return Response
+   *
+   * @throws OzoneException
+   */
+  @Override
+  public Response deleteBucket(String volume, String bucket, Request req,
+                               UriInfo info, HttpHeaders headers)
+      throws OzoneException {
+    return new BucketProcessTemplate() {
+      @Override
+      public Response doProcess(BucketArgs args)
+          throws OzoneException, IOException {
+        StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
+        fs.deleteBucket(args);
+        return OzoneUtils.getResponse(args, HTTP_OK, "");
+      }
+    }.handleCall(volume, bucket, req, info, headers);
+  }
+
+  /**
+   * List Buckets allows the user to list the bucket.
+   *
+   * @param volume - Storage Volume Name
+   * @param bucket - Bucket Name
+   * @param info - Uri Info
+   * @param prefix - Prefix for the keys to be fetched
+   * @param maxKeys - MaxNumber of Keys to Return
+   * @param startPage - Continuation Token
+   * @param req - Http request
+   * @param headers - Http headers
+   *
+   * @return - Json Body
+   *
+   * @throws OzoneException
+   */
+  @Override
+  public Response listBucket(String volume, String bucket, final String info,
+                             final String prefix, final int maxKeys,
+                             final String startPage, Request req,
+                             UriInfo uriInfo, HttpHeaders headers)
+      throws OzoneException {
+    return new BucketProcessTemplate() {
+      @Override
+      public Response doProcess(BucketArgs args)
+          throws OzoneException, IOException {
+        switch (info) {
+          //TODO : Enable when Object support is enabled.
+          //          case Header.OZONE_LIST_QUERY_KEY:
+          //            ListArgs listArgs = new ListArgs(args, prefix,
+          // maxKeys, startPage);
+          //            return getBucketKeysList(listArgs);
+          case Header.OZONE_LIST_QUERY_BUCKET:
+            return getBucketInfoResponse(args);
+          default:
+            OzoneException ozException =
+                ErrorTable.newError(ErrorTable.INVALID_QUERY_PARAM, args);
+            ozException.setMessage("Unrecognized query param : " + info);
+            throw ozException;
+        }
+      }
+    }.handleCall(volume, bucket, req, uriInfo, headers);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7851874/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java
new file mode 100644
index 0000000..73827db
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java
@@ -0,0 +1,278 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.web.handlers;
+
+
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
+import org.apache.hadoop.ozone.web.exceptions.OzoneException;
+import org.apache.hadoop.ozone.web.headers.Header;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.web.interfaces.UserAuth;
+import org.apache.hadoop.ozone.web.response.BucketInfo;
+import org.apache.hadoop.ozone.web.utils.OzoneConsts;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.Request;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import java.io.IOException;
+import java.nio.file.DirectoryNotEmptyException;
+import java.nio.file.FileAlreadyExistsException;
+import java.nio.file.NoSuchFileException;
+import java.util.LinkedList;
+import java.util.List;
+
+import static java.net.HttpURLConnection.HTTP_OK;
+
+
+/**
+ * This class abstracts way the repetitive tasks in
+ * Bucket handling code.
+ */
+public abstract class BucketProcessTemplate {
+
+  /**
+   * This function serves as the common error handling function
+   * for all bucket related operations.
+   *
+   * @param volume - Volume Name
+   * @param bucket - Bucket Name
+   * @param request - Http Request
+   * @param uriInfo - Http Uri
+   * @param headers - Http Headers
+   *
+   * @return Response
+   *
+   * @throws OzoneException
+   */
+  public Response handleCall(String volume, String bucket, Request request,
+                             UriInfo uriInfo, HttpHeaders headers)
+      throws OzoneException {
+    // TODO : Add logging
+    String reqID = OzoneUtils.getRequestID();
+    String hostName = OzoneUtils.getHostName();
+    try {
+      OzoneUtils.validate(request, headers, reqID, bucket, hostName);
+      OzoneUtils.verifyBucketName(bucket);
+
+      UserAuth auth = UserHandlerBuilder.getAuthHandler();
+      UserArgs userArgs =
+          new UserArgs(reqID, hostName, request, uriInfo, headers);
+      userArgs.setUserName(auth.getUser(userArgs));
+
+      BucketArgs args = new BucketArgs(volume, bucket, userArgs);
+      return doProcess(args);
+    } catch (IllegalArgumentException argExp) {
+      OzoneException ex = ErrorTable
+          .newError(ErrorTable.INVALID_BUCKET_NAME, reqID, bucket, hostName);
+      ex.setMessage(argExp.getMessage());
+      throw ex;
+    } catch (IOException fsExp) {
+      handleIOException(bucket, reqID, hostName, fsExp);
+    }
+    return null;
+  }
+
+  /**
+   * Reads ACLs from headers and throws appropriate exception if needed.
+   *
+   * @param args - bucketArgs
+   *
+   * @throws OzoneException
+   */
+  void getAclsFromHeaders(BucketArgs args, boolean parseRemoveACL)
+      throws OzoneException {
+    try {
+      List<String> acls = getAcls(args, Header.OZONE_ACL_REMOVE);
+      if (acls != null && !acls.isEmpty()) {
+        args.removeAcls(acls);
+      }
+      if ((!parseRemoveACL) && args.getRemoveAcls() != null) {
+        OzoneException ex = ErrorTable.newError(ErrorTable.MALFORMED_ACL, args);
+        ex.setMessage("Invalid Remove ACLs");
+        throw ex;
+      }
+
+      acls = getAcls(args, Header.OZONE_ACL_ADD);
+      if (acls != null && !acls.isEmpty()) {
+        args.addAcls(acls);
+      }
+    } catch (IllegalArgumentException ex) {
+      throw ErrorTable.newError(ErrorTable.MALFORMED_ACL, args, ex);
+    }
+  }
+
+  /**
+   * Converts FileSystem IO exceptions to OZONE exceptions.
+   *
+   * @param bucket Name of the bucket
+   * @param reqID Request ID
+   * @param hostName Machine Name
+   * @param fsExp Exception
+   *
+   * @throws OzoneException
+   */
+  void handleIOException(String bucket, String reqID, String hostName,
+                         IOException fsExp) throws OzoneException {
+
+    if (fsExp instanceof FileAlreadyExistsException) {
+      throw ErrorTable
+          .newError(ErrorTable.BUCKET_ALREADY_EXISTS, reqID, bucket, hostName);
+    }
+
+    if (fsExp instanceof DirectoryNotEmptyException) {
+      throw ErrorTable
+          .newError(ErrorTable.BUCKET_NOT_EMPTY, reqID, bucket, hostName);
+    }
+
+    if (fsExp instanceof NoSuchFileException) {
+      throw ErrorTable
+          .newError(ErrorTable.INVALID_BUCKET_NAME, reqID, bucket, hostName);
+    }
+
+    // default we don't handle this exception yet.
+
+    throw ErrorTable.newError(ErrorTable.SERVER_ERROR, reqID, bucket, hostName);
+  }
+
+  /**
+   * Abstract function that gets implemented in the BucketHandler functions.
+   * This function will just deal with the core file system related logic
+   * and will rely on handleCall function for repetitive error checks
+   *
+   * @param args - parsed bucket args, name, userName, ACLs etc
+   *
+   * @return Response
+   *
+   * @throws OzoneException
+   * @throws IOException
+   */
+  public abstract Response doProcess(BucketArgs args)
+      throws OzoneException, IOException;
+
+
+  /**
+   * Returns the ACL String if available.
+   * This function ignores all ACLs that are not prefixed with either
+   * ADD or Remove
+   *
+   * @param args - BucketArgs
+   * @param tag - Tag for different type of acls
+   *
+   * @return List of ACLs
+   *
+   * @throws OzoneException
+   */
+  List<String> getAcls(BucketArgs args, String tag) throws OzoneException {
+    List<String> aclStrings =
+        args.getHeaders().getRequestHeader(Header.OZONE_ACLS);
+    List<String> filteredSet = null;
+    if (aclStrings != null) {
+      filteredSet = new LinkedList<>();
+      for (String s : aclStrings) {
+        if (s.startsWith(tag)) {
+          filteredSet.add(s.replaceFirst(tag, ""));
+        }
+      }
+    }
+    return filteredSet;
+  }
+
+  /**
+   * Returns bucket versioning Info.
+   *
+   * @param args - BucketArgs
+   *
+   * @return - String
+   *
+   * @throws OzoneException
+   */
+  OzoneConsts.Versioning getVersioning(BucketArgs args) throws OzoneException {
+
+    List<String> versionStrings =
+        args.getHeaders().getRequestHeader(Header.OZONE_BUCKET_VERSIONING);
+    if (versionStrings == null) {
+      return null;
+    }
+
+    if (versionStrings.size() > 1) {
+      OzoneException ex =
+          ErrorTable.newError(ErrorTable.MALFORMED_BUCKET_VERSION, args);
+      ex.setMessage("Exactly one bucket version header required");
+      throw ex;
+    }
+
+    String version = versionStrings.get(0);
+    try {
+      return OzoneConsts.Versioning.valueOf(version);
+    } catch (IllegalArgumentException ex) {
+      throw ErrorTable.newError(ErrorTable.MALFORMED_BUCKET_VERSION, args, ex);
+    }
+  }
+
+
+  /**
+   * Returns Storage Class if Available or returns Default.
+   *
+   * @param args - bucketArgs
+   *
+   * @return StorageType
+   *
+   * @throws OzoneException
+   */
+  StorageType getStorageType(BucketArgs args) throws OzoneException {
+
+    try {
+      List<String> storageClassString =
+          args.getHeaders().getRequestHeader(Header.OZONE_STORAGE_CLASS);
+      if (storageClassString == null) {
+        return null;
+      }
+      if (storageClassString.size() > 1) {
+        OzoneException ex =
+            ErrorTable.newError(ErrorTable.MALFORMED_STORAGE_TYPE, args);
+        ex.setMessage("Exactly one storage class header required");
+        throw ex;
+      }
+      return StorageType.valueOf(storageClassString.get(0).toUpperCase());
+    } catch (IllegalArgumentException ex) {
+      throw ErrorTable.newError(ErrorTable.MALFORMED_STORAGE_TYPE, args, ex);
+    }
+  }
+
+  /**
+   * Returns BucketInfo response.
+   *
+   * @param args - BucketArgs
+   *
+   * @return BucketInfo
+   *
+   * @throws IOException
+   * @throws OzoneException
+   */
+  Response getBucketInfoResponse(BucketArgs args)
+      throws IOException, OzoneException {
+    StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
+    BucketInfo info = fs.getBucketInfo(args);
+    return OzoneUtils.getResponse(args, HTTP_OK, info.toJsonString());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7851874/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/headers/Header.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/headers/Header.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/headers/Header.java
index 6400b44..a804235 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/headers/Header.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/headers/Header.java
@@ -38,12 +38,22 @@ public final class Header {
 
   public static final String OZONE_LIST_QUERY_SERVICE = "service";
   public static final String OZONE_LIST_QUERY_VOLUME = "volume";
-  public static final String OZONE_LIST_QUERY_BUCKET ="bucket";
-  public static final String OZONE_LIST_QUERY_KEY ="key";
+  public static final String OZONE_LIST_QUERY_BUCKET = "bucket";
+  public static final String OZONE_LIST_QUERY_KEY = "key";
 
   public static final String OZONE_REQUEST_ID = "x-ozone-request-id";
   public static final String OZONE_SERVER_NAME = "x-ozone-server-name";
 
+  public static final String OZONE_STORAGE_CLASS = "x-ozone-storage-type";
+
+  public static final String OZONE_BUCKET_VERSIONING =
+      "x-ozone-bucket-versioning";
+
+  public static final String OZONE_ACLS = "x-ozone-acls";
+  public static final String OZONE_ACL_ADD = "ADD";
+  public static final String OZONE_ACL_REMOVE = "REMOVE";
+
+
   private Header() {
     // Never constructed.
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7851874/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/Bucket.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/Bucket.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/Bucket.java
new file mode 100644
index 0000000..36141e3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/Bucket.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.web.interfaces;
+
+import org.apache.hadoop.ozone.web.exceptions.OzoneException;
+import org.apache.hadoop.ozone.web.headers.Header;
+
+import javax.ws.rs.DELETE;
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Request;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+
+/**
+ * Bucket Interface acts as the HTTP entry point for
+ * bucket related functionality.
+ */
+@Path("/{volume}/{bucket}")
+public interface Bucket {
+  /**
+   * createBucket call handles the POST request for Creating a Bucket.
+   *
+   * @param volume - Volume name
+   * @param bucket - Bucket Name
+   * @param req - Http request
+   * @param info - Uri Info
+   * @param headers - Http headers
+   *
+   * @return Response
+   *
+   * @throws OzoneException
+   */
+  @POST
+  Response createBucket(@PathParam("volume") String volume,
+                        @PathParam("bucket") String bucket,
+                        @Context Request req, @Context UriInfo info,
+                        @Context HttpHeaders headers) throws OzoneException;
+
+  /**
+   * updateBucket call handles the PUT request for updating a Bucket.
+   *
+   * @param volume - Volume name
+   * @param bucket - Bucket name
+   * @param req - Http request
+   * @param info - Uri Info
+   * @param headers - Http headers
+   *
+   * @return Response
+   *
+   * @throws OzoneException
+   */
+  @PUT
+  Response updateBucket(@PathParam("volume") String volume,
+                        @PathParam("bucket") String bucket,
+                        @Context Request req, @Context UriInfo info,
+                        @Context HttpHeaders headers) throws OzoneException;
+
+  /**
+   * Deletes an empty bucket.
+   *
+   * @param volume Volume name
+   * @param bucket Bucket Name
+   * @param req - Http request
+   * @param info - Uri Info
+   * @param headers - Http headers
+   *
+   * @return Response
+   *
+   * @throws OzoneException
+   */
+  @DELETE
+  Response deleteBucket(@PathParam("volume") String volume,
+                        @PathParam("bucket") String bucket,
+                        @Context Request req, @Context UriInfo info,
+                        @Context HttpHeaders headers) throws OzoneException;
+
+  /**
+   * List Buckets lists the contents of a bucket.
+   *
+   * @param volume - Storage Volume Name
+   * @param bucket - Bucket Name
+   * @param info - Information type needed
+   * @param prefix - Prefix for the keys to be fetched
+   * @param maxKeys - MaxNumber of Keys to Return
+   * @param startPage - Continuation Token
+   * @param req - Http request
+   * @param headers - Http headers
+   *
+   * @return - Json Body
+   *
+   * @throws OzoneException
+   */
+
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  Response listBucket(@PathParam("volume") String volume,
+                      @PathParam("bucket") String bucket,
+                      @DefaultValue(Header.OZONE_LIST_QUERY_KEY)
+                      @QueryParam("info") String info,
+                      @QueryParam("prefix") String prefix,
+                      @DefaultValue("1000") @QueryParam("max-keys") int maxKeys,
+                      @QueryParam("start-page") String startPage,
+                      @Context Request req, @Context UriInfo uriInfo,
+                      @Context HttpHeaders headers) throws OzoneException;
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7851874/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java
index 043b59e..6759888 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java
@@ -20,8 +20,11 @@ package org.apache.hadoop.ozone.web.interfaces;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.ozone.web.exceptions.OzoneException;
+import org.apache.hadoop.ozone.web.handlers.BucketArgs;
 import org.apache.hadoop.ozone.web.handlers.UserArgs;
 import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
+import org.apache.hadoop.ozone.web.response.BucketInfo;
+import org.apache.hadoop.ozone.web.response.ListBuckets;
 import org.apache.hadoop.ozone.web.response.ListVolumes;
 import org.apache.hadoop.ozone.web.response.VolumeInfo;
 
@@ -121,4 +124,86 @@ public interface StorageHandler {
    * @throws OzoneException
    */
   VolumeInfo getVolumeInfo(VolumeArgs args) throws IOException, OzoneException;
+
+  /**
+   * Creates a Bucket in specified Volume.
+   *
+   * @param args BucketArgs- BucketName, UserName and Acls
+   *
+   * @throws IOException
+   */
+  void createBucket(BucketArgs args) throws IOException, OzoneException;
+
+  /**
+   * Adds or Removes ACLs from a Bucket.
+   *
+   * @param args - BucketArgs
+   *
+   * @throws IOException
+   */
+  void setBucketAcls(BucketArgs args) throws IOException, OzoneException;
+
+  /**
+   * Enables or disables Bucket Versioning.
+   *
+   * @param args - BucketArgs
+   *
+   * @throws IOException
+   */
+  void setBucketVersioning(BucketArgs args) throws IOException, OzoneException;
+
+  /**
+   * Sets the Storage Class of a Bucket.
+   *
+   * @param args - BucketArgs
+   *
+   * @throws IOException
+   */
+  void setBucketStorageClass(BucketArgs args)
+      throws IOException, OzoneException;
+
+  /**
+   * Deletes a bucket if it is empty.
+   *
+   * @param args Bucket args structure
+   *
+   * @throws IOException
+   */
+  void deleteBucket(BucketArgs args) throws IOException, OzoneException;
+
+  /**
+   * true if the bucket exists and user has read access
+   * to the bucket else throws Exception.
+   *
+   * @param args Bucket args structure
+   *
+   * @throws IOException
+   */
+  void checkBucketAccess(BucketArgs args) throws IOException, OzoneException;
+
+
+  /**
+   * Returns all Buckets of a specified Volume.
+   *
+   * @param args --User Args
+   *
+   * @return ListAllBuckets
+   *
+   * @throws OzoneException
+   */
+  ListBuckets listBuckets(VolumeArgs args) throws IOException, OzoneException;
+
+
+  /**
+   * Returns Bucket's Metadata as a String.
+   *
+   * @param args Bucket args structure
+   *
+   * @return Info about the bucket
+   *
+   * @throws IOException
+   */
+  BucketInfo getBucketInfo(BucketArgs args) throws IOException, OzoneException;
+
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7851874/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/LocalStorageHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/LocalStorageHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/LocalStorageHandler.java
index 939ed1e..e00a66f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/LocalStorageHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/LocalStorageHandler.java
@@ -20,10 +20,13 @@ package org.apache.hadoop.ozone.web.localstorage;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.ozone.web.exceptions.OzoneException;
+import org.apache.hadoop.ozone.web.handlers.BucketArgs;
 import org.apache.hadoop.ozone.web.handlers.UserArgs;
 import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
 import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
 import org.apache.hadoop.ozone.web.request.OzoneQuota;
+import org.apache.hadoop.ozone.web.response.BucketInfo;
+import org.apache.hadoop.ozone.web.response.ListBuckets;
 import org.apache.hadoop.ozone.web.response.ListVolumes;
 import org.apache.hadoop.ozone.web.response.VolumeInfo;
 
@@ -158,4 +161,110 @@ public class LocalStorageHandler implements StorageHandler {
     return oz.listVolumes(args);
   }
 
+  /**
+   * true if the bucket exists and user has read access
+   * to the bucket else throws Exception.
+   *
+   * @param args Bucket args structure
+   *
+   * @throws IOException
+   */
+  @Override
+  public void checkBucketAccess(BucketArgs args)
+      throws IOException, OzoneException {
+
+  }
+
+  /**
+   * Creates a Bucket in specified Volume.
+   *
+   * @param args BucketArgs- BucketName, UserName and Acls
+   *
+   * @throws IOException
+   */
+  @Override
+  public void createBucket(BucketArgs args) throws IOException, OzoneException {
+
+  }
+
+  /**
+   * Adds or Removes ACLs from a Bucket.
+   *
+   * @param args - BucketArgs
+   *
+   * @throws IOException
+   */
+  @Override
+  public void setBucketAcls(BucketArgs args)
+      throws IOException, OzoneException {
+
+  }
+
+  /**
+   * Enables or disables Bucket Versioning.
+   *
+   * @param args - BucketArgs
+   *
+   * @throws IOException
+   */
+  @Override
+  public void setBucketVersioning(BucketArgs args)
+      throws IOException, OzoneException {
+
+  }
+
+  /**
+   * Sets the Storage Class of a Bucket.
+   *
+   * @param args - BucketArgs
+   *
+   * @throws IOException
+   */
+  @Override
+  public void setBucketStorageClass(BucketArgs args)
+      throws IOException, OzoneException {
+
+  }
+
+  /**
+   * Deletes a bucket if it is empty.
+   *
+   * @param args Bucket args structure
+   *
+   * @throws IOException
+   */
+  @Override
+  public void deleteBucket(BucketArgs args) throws IOException, OzoneException {
+
+  }
+
+  /**
+   * Returns all Buckets of a specified Volume.
+   *
+   * @param args --User Args
+   *
+   * @return ListAllBuckets
+   *
+   * @throws OzoneException
+   */
+  @Override
+  public ListBuckets listBuckets(VolumeArgs args)
+      throws IOException, OzoneException {
+    return null;
+  }
+
+  /**
+   * Returns Bucket's Metadata as a String.
+   *
+   * @param args Bucket args structure
+   *
+   * @return Info about the bucket
+   *
+   * @throws IOException
+   */
+  @Override
+  public BucketInfo getBucketInfo(BucketArgs args)
+      throws IOException, OzoneException {
+    return null;
+  }
 }