You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xg...@apache.org on 2017/06/05 21:04:10 UTC

[01/50] [abbrv] hadoop git commit: YARN-6555. Store application flow context in NM state store for work-preserving restart. (Rohith Sharma K S via Haibo Chen) [Forced Update!]

Repository: hadoop
Updated Branches:
  refs/heads/YARN-5734 b361c5be8 -> ac788fd47 (forced update)


YARN-6555. Store application flow context in NM state store for work-preserving restart. (Rohith Sharma K S via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47474fff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47474fff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47474fff

Branch: refs/heads/YARN-5734
Commit: 47474fffac085e0e5ea46336bf80ccd0677017a3
Parents: 2b5ad48
Author: Haibo Chen <ha...@apache.org>
Authored: Thu May 25 21:15:27 2017 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Thu May 25 21:15:27 2017 -0700

----------------------------------------------------------------------
 .../containermanager/ContainerManagerImpl.java  | 71 +++++++++++++-------
 .../application/ApplicationImpl.java            | 27 ++++++--
 .../yarn_server_nodemanager_recovery.proto      |  7 ++
 .../TestContainerManagerRecovery.java           | 40 +++++++++--
 4 files changed, 111 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47474fff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index f65f1ac..50268b9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -85,6 +85,7 @@ import org.apache.hadoop.yarn.ipc.RPCUtil;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationACLMapProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.FlowContextProto;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.security.NMTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.ContainerType;
@@ -381,10 +382,20 @@ public class ContainerManagerImpl extends CompositeService implements
           new LogAggregationContextPBImpl(p.getLogAggregationContext());
     }
 
+    FlowContext fc = null;
+    if (p.getFlowContext() != null) {
+      FlowContextProto fcp = p.getFlowContext();
+      fc = new FlowContext(fcp.getFlowName(), fcp.getFlowVersion(),
+          fcp.getFlowRunId());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(
+            "Recovering Flow context: " + fc + " for an application " + appId);
+      }
+    }
+
     LOG.info("Recovering application " + appId);
-    //TODO: Recover flow and flow run ID
-    ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), appId,
-        creds, context, p.getAppLogAggregationInitedTime());
+    ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), fc,
+        appId, creds, context, p.getAppLogAggregationInitedTime());
     context.getApplications().put(appId, app);
     app.handle(new ApplicationInitEvent(appId, acls, logAggregationContext));
   }
@@ -936,7 +947,7 @@ public class ContainerManagerImpl extends CompositeService implements
   private ContainerManagerApplicationProto buildAppProto(ApplicationId appId,
       String user, Credentials credentials,
       Map<ApplicationAccessType, String> appAcls,
-      LogAggregationContext logAggregationContext) {
+      LogAggregationContext logAggregationContext, FlowContext flowContext) {
 
     ContainerManagerApplicationProto.Builder builder =
         ContainerManagerApplicationProto.newBuilder();
@@ -971,6 +982,16 @@ public class ContainerManagerImpl extends CompositeService implements
       }
     }
 
+    builder.clearFlowContext();
+    if (flowContext != null && flowContext.getFlowName() != null
+        && flowContext.getFlowVersion() != null) {
+      FlowContextProto fcp =
+          FlowContextProto.newBuilder().setFlowName(flowContext.getFlowName())
+              .setFlowVersion(flowContext.getFlowVersion())
+              .setFlowRunId(flowContext.getFlowRunId()).build();
+      builder.setFlowContext(fcp);
+    }
+
     return builder.build();
   }
 
@@ -1016,25 +1037,29 @@ public class ContainerManagerImpl extends CompositeService implements
     this.readLock.lock();
     try {
       if (!isServiceStopped()) {
-        // Create the application
-        // populate the flow context from the launch context if the timeline
-        // service v.2 is enabled
-        FlowContext flowContext = null;
-        if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) {
-          String flowName = launchContext.getEnvironment().get(
-              TimelineUtils.FLOW_NAME_TAG_PREFIX);
-          String flowVersion = launchContext.getEnvironment().get(
-              TimelineUtils.FLOW_VERSION_TAG_PREFIX);
-          String flowRunIdStr = launchContext.getEnvironment().get(
-              TimelineUtils.FLOW_RUN_ID_TAG_PREFIX);
-          long flowRunId = 0L;
-          if (flowRunIdStr != null && !flowRunIdStr.isEmpty()) {
-            flowRunId = Long.parseLong(flowRunIdStr);
-          }
-          flowContext =
-              new FlowContext(flowName, flowVersion, flowRunId);
-        }
         if (!context.getApplications().containsKey(applicationID)) {
+          // Create the application
+          // populate the flow context from the launch context if the timeline
+          // service v.2 is enabled
+          FlowContext flowContext = null;
+          if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) {
+            String flowName = launchContext.getEnvironment()
+                .get(TimelineUtils.FLOW_NAME_TAG_PREFIX);
+            String flowVersion = launchContext.getEnvironment()
+                .get(TimelineUtils.FLOW_VERSION_TAG_PREFIX);
+            String flowRunIdStr = launchContext.getEnvironment()
+                .get(TimelineUtils.FLOW_RUN_ID_TAG_PREFIX);
+            long flowRunId = 0L;
+            if (flowRunIdStr != null && !flowRunIdStr.isEmpty()) {
+              flowRunId = Long.parseLong(flowRunIdStr);
+            }
+            flowContext = new FlowContext(flowName, flowVersion, flowRunId);
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Flow context: " + flowContext
+                  + " created for an application " + applicationID);
+            }
+          }
+
           Application application =
               new ApplicationImpl(dispatcher, user, flowContext,
                   applicationID, credentials, context);
@@ -1048,7 +1073,7 @@ public class ContainerManagerImpl extends CompositeService implements
                 container.getLaunchContext().getApplicationACLs();
             context.getNMStateStore().storeApplication(applicationID,
                 buildAppProto(applicationID, user, credentials, appAcls,
-                    logAggregationContext));
+                    logAggregationContext, flowContext));
             dispatcher.getEventHandler().handle(new ApplicationInitEvent(
                 applicationID, appAcls, logAggregationContext));
           }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47474fff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
index 444581c..80863a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.proto.YarnProtos;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.FlowContextProto;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEventType;
@@ -106,13 +107,6 @@ public class ApplicationImpl implements Application {
   }
 
   public ApplicationImpl(Dispatcher dispatcher, String user,
-      ApplicationId appId, Credentials credentials, Context context,
-      long recoveredLogInitedTime) {
-    this(dispatcher, user, null, appId, credentials, context,
-      recoveredLogInitedTime);
-  }
-
-  public ApplicationImpl(Dispatcher dispatcher, String user,
       FlowContext flowContext, ApplicationId appId, Credentials credentials,
       Context context, long recoveredLogInitedTime) {
     this.dispatcher = dispatcher;
@@ -171,6 +165,15 @@ public class ApplicationImpl implements Application {
     public long getFlowRunId() {
       return flowRunId;
     }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("{");
+      sb.append("Flow Name=").append(getFlowName());
+      sb.append(" Flow Versioin=").append(getFlowVersion());
+      sb.append(" Flow Run Id=").append(getFlowRunId()).append(" }");
+      return sb.toString();
+    }
   }
 
   @Override
@@ -390,6 +393,16 @@ public class ApplicationImpl implements Application {
 
     builder.setAppLogAggregationInitedTime(app.applicationLogInitedTimestamp);
 
+    builder.clearFlowContext();
+    if (app.flowContext != null && app.flowContext.getFlowName() != null
+        && app.flowContext.getFlowVersion() != null) {
+      FlowContextProto fcp = FlowContextProto.newBuilder()
+          .setFlowName(app.flowContext.getFlowName())
+          .setFlowVersion(app.flowContext.getFlowVersion())
+          .setFlowRunId(app.flowContext.getFlowRunId()).build();
+      builder.setFlowContext(fcp);
+    }
+
     return builder.build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47474fff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
index 0dfa20e..7831711 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
@@ -31,6 +31,7 @@ message ContainerManagerApplicationProto {
   repeated ApplicationACLMapProto acls = 4;
   optional LogAggregationContextProto log_aggregation_context = 5;
   optional int64 appLogAggregationInitedTime = 6 [ default = -1 ];
+  optional FlowContextProto flowContext = 7;
 }
 
 message DeletionServiceDeleteTaskProto {
@@ -52,3 +53,9 @@ message LogDeleterProto {
   optional string user = 1;
   optional int64 deletionTime = 2;
 }
+
+message FlowContextProto {
+  optional string flowName = 1;
+  optional string flowVersion = 2;
+  optional int64 flowRunId = 3;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47474fff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
index 633bb6d..075d857 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
@@ -107,6 +107,7 @@ import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecret
 import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM;
 import org.apache.hadoop.yarn.server.nodemanager.timelineservice.NMTimelinePublisher;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
+import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -136,6 +137,11 @@ public class TestContainerManagerRecovery extends BaseContainerManagerTest {
     conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
     conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogDir.getAbsolutePath());
     conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
+
+    // enable atsv2 by default in test
+    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+    conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
+
     // Default delSrvc
     delSrvc = createDeletionService();
     delSrvc.init(conf);
@@ -144,6 +150,7 @@ public class TestContainerManagerRecovery extends BaseContainerManagerTest {
     nodeHealthChecker = new NodeHealthCheckerService(
         NodeManager.getNodeHealthScriptRunner(conf), dirsHandler);
     nodeHealthChecker.init(conf);
+
   }
 
   @Test
@@ -161,6 +168,7 @@ public class TestContainerManagerRecovery extends BaseContainerManagerTest {
     cm.start();
 
     // add an application by starting a container
+    String appName = "app_name1";
     String appUser = "app_user1";
     String modUser = "modify_user1";
     String viewUser = "view_user1";
@@ -170,7 +178,8 @@ public class TestContainerManagerRecovery extends BaseContainerManagerTest {
         ApplicationAttemptId.newInstance(appId, 1);
     ContainerId cid = ContainerId.newContainerId(attemptId, 1);
     Map<String, LocalResource> localResources = Collections.emptyMap();
-    Map<String, String> containerEnv = Collections.emptyMap();
+    Map<String, String> containerEnv = new HashMap<>();
+    setFlowContext(containerEnv, appName, appId);
     List<String> containerCmds = Collections.emptyList();
     Map<String, ByteBuffer> serviceData = Collections.emptyMap();
     Credentials containerCreds = new Credentials();
@@ -318,7 +327,8 @@ public class TestContainerManagerRecovery extends BaseContainerManagerTest {
         ApplicationAttemptId.newInstance(appId, 1);
     ContainerId cid = ContainerId.newContainerId(attemptId, 1);
     Map<String, LocalResource> localResources = Collections.emptyMap();
-    Map<String, String> containerEnv = Collections.emptyMap();
+    Map<String, String> containerEnv = new HashMap<>();
+    setFlowContext(containerEnv, "app_name1", appId);
     List<String> containerCmds = Collections.emptyList();
     Map<String, ByteBuffer> serviceData = Collections.emptyMap();
 
@@ -400,7 +410,8 @@ public class TestContainerManagerRecovery extends BaseContainerManagerTest {
     ApplicationAttemptId attemptId =
         ApplicationAttemptId.newInstance(appId, 1);
     ContainerId cid = ContainerId.newContainerId(attemptId, 1);
-    Map<String, String> containerEnv = Collections.emptyMap();
+    Map<String, String> containerEnv = new HashMap<>();
+    setFlowContext(containerEnv, "app_name1", appId);
     Map<String, ByteBuffer> serviceData = Collections.emptyMap();
     Credentials containerCreds = new Credentials();
     DataOutputBuffer dob = new DataOutputBuffer();
@@ -476,7 +487,8 @@ public class TestContainerManagerRecovery extends BaseContainerManagerTest {
         ApplicationAttemptId.newInstance(appId, 1);
     ContainerId cid = ContainerId.newContainerId(attemptId, 1);
     Map<String, LocalResource> localResources = Collections.emptyMap();
-    Map<String, String> containerEnv = Collections.emptyMap();
+    Map<String, String> containerEnv = new HashMap<>();
+    setFlowContext(containerEnv, "app_name1", appId);
     List<String> containerCmds = Collections.emptyList();
     Map<String, ByteBuffer> serviceData = Collections.emptyMap();
     Credentials containerCreds = new Credentials();
@@ -760,4 +772,24 @@ public class TestContainerManagerRecovery extends BaseContainerManagerTest {
     containerManager.dispatcher.disableExitOnDispatchException();
     return containerManager;
   }
+
+  private void setFlowContext(Map<String, String> containerEnv, String appName,
+      ApplicationId appId) {
+    if (YarnConfiguration.timelineServiceV2Enabled(conf)) {
+      setFlowTags(containerEnv, TimelineUtils.FLOW_NAME_TAG_PREFIX,
+          TimelineUtils.generateDefaultFlowName(appName, appId));
+      setFlowTags(containerEnv, TimelineUtils.FLOW_VERSION_TAG_PREFIX,
+          TimelineUtils.DEFAULT_FLOW_VERSION);
+      setFlowTags(containerEnv, TimelineUtils.FLOW_RUN_ID_TAG_PREFIX,
+          String.valueOf(System.currentTimeMillis()));
+    }
+  }
+
+  private static void setFlowTags(Map<String, String> environment,
+      String tagPrefix, String value) {
+    if (!value.isEmpty()) {
+      environment.put(tagPrefix, value);
+    }
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: HDFS-11904. Reuse iip in unprotectedRemoveXAttrs calls.

Posted by xg...@apache.org.
HDFS-11904. Reuse iip in unprotectedRemoveXAttrs calls.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/219f4c19
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/219f4c19
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/219f4c19

Branch: refs/heads/YARN-5734
Commit: 219f4c199e45f8ce7f41192493bf0dc8f1e5dc30
Parents: 6a3fc68
Author: Xiao Chen <xi...@apache.org>
Authored: Thu Jun 1 14:13:57 2017 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Thu Jun 1 14:13:57 2017 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java       | 2 +-
 .../org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java    | 5 ++---
 .../org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java | 3 ++-
 3 files changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/219f4c19/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index a875e4b..bedbe7d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -242,7 +242,7 @@ final class FSDirErasureCodingOp {
 
     final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
     xattrs.add(ecXAttr);
-    FSDirXAttrOp.unprotectedRemoveXAttrs(fsd, srcIIP.getPath(), xattrs);
+    FSDirXAttrOp.unprotectedRemoveXAttrs(fsd, srcIIP, xattrs);
     return xattrs;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/219f4c19/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index e5243ee..ddc088c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -170,7 +170,7 @@ class FSDirXAttrOp {
       src = iip.getPath();
       checkXAttrChangeAccess(fsd, iip, xAttr, pc);
 
-      List<XAttr> removedXAttrs = unprotectedRemoveXAttrs(fsd, src, xAttrs);
+      List<XAttr> removedXAttrs = unprotectedRemoveXAttrs(fsd, iip, xAttrs);
       if (removedXAttrs != null && !removedXAttrs.isEmpty()) {
         fsd.getEditLog().logRemoveXAttrs(src, removedXAttrs, logRetryCache);
       } else {
@@ -184,10 +184,9 @@ class FSDirXAttrOp {
   }
 
   static List<XAttr> unprotectedRemoveXAttrs(
-      FSDirectory fsd, final String src, final List<XAttr> toRemove)
+      FSDirectory fsd, final INodesInPath iip, final List<XAttr> toRemove)
       throws IOException {
     assert fsd.hasWriteLock();
-    INodesInPath iip = fsd.getINodesInPath(src, DirOp.WRITE);
     INode inode = FSDirectory.resolveLastINode(iip);
     int snapshotId = iip.getLatestSnapshotId();
     List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/219f4c19/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index ae0b304..060bd59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -931,7 +931,8 @@ public class FSEditLogLoader {
     }
     case OP_REMOVE_XATTR: {
       RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op;
-      FSDirXAttrOp.unprotectedRemoveXAttrs(fsDir, removeXAttrOp.src,
+      INodesInPath iip = fsDir.getINodesInPath(removeXAttrOp.src, DirOp.WRITE);
+      FSDirXAttrOp.unprotectedRemoveXAttrs(fsDir, iip,
                                            removeXAttrOp.xAttrs);
       if (toAddRetryCache) {
         fsNamesys.addCacheEntry(removeXAttrOp.rpcClientId,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: YARN-6646. Modifier 'static' is redundant for inner enums (Contributed by ZhangBing Lin via Daniel Templeton)

Posted by xg...@apache.org.
YARN-6646. Modifier 'static' is redundant for inner enums
(Contributed by ZhangBing Lin via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d81372df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d81372df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d81372df

Branch: refs/heads/YARN-5734
Commit: d81372dfad32488e7c46ffcfccdf0aa26bee04a5
Parents: aea4293
Author: Daniel Templeton <te...@apache.org>
Authored: Fri May 26 12:05:48 2017 -0700
Committer: Daniel Templeton <te...@apache.org>
Committed: Fri May 26 12:05:48 2017 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/api/records/timelineservice/TimelineMetric.java  | 2 +-
 .../yarn/applications/distributedshell/ApplicationMaster.java    | 4 ++--
 .../hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java     | 4 ++--
 .../ApplicationHistoryManagerOnTimelineStore.java                | 2 +-
 .../hadoop/yarn/server/nodemanager/CMgrCompletedAppsEvent.java   | 2 +-
 .../yarn/server/nodemanager/CMgrCompletedContainersEvent.java    | 2 +-
 .../org/apache/hadoop/yarn/server/nodemanager/NMAuditLogger.java | 2 +-
 .../apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java | 2 +-
 .../resourcemanager/metrics/AbstractSystemMetricsPublisher.java  | 2 +-
 .../server/timelineservice/reader/filter/TimelineFilterList.java | 2 +-
 10 files changed, 12 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81372df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
index 5c908d6..2fa6d30 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
@@ -43,7 +43,7 @@ public class TimelineMetric {
   /**
    * Type of metric.
    */
-  public static enum Type {
+  public enum Type {
     SINGLE_VALUE,
     TIME_SERIES
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81372df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index 4daebb5..ab4607a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -179,13 +179,13 @@ public class ApplicationMaster {
 
   @VisibleForTesting
   @Private
-  public static enum DSEvent {
+  public enum DSEvent {
     DS_APP_ATTEMPT_START, DS_APP_ATTEMPT_END, DS_CONTAINER_START, DS_CONTAINER_END
   }
   
   @VisibleForTesting
   @Private
-  public static enum DSEntity {
+  public enum DSEntity {
     DS_APP_ATTEMPT, DS_CONTAINER
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81372df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
index 515a8e8..20be71e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
@@ -406,7 +406,7 @@ public class NMClientAsyncImpl extends NMClientAsync {
     }
   }
 
-  protected static enum ContainerState {
+  protected enum ContainerState {
     PREP, FAILED, RUNNING, DONE,
   }
 
@@ -423,7 +423,7 @@ public class NMClientAsyncImpl extends NMClientAsync {
   /**
    * The type of the event of interacting with a container
    */
-  protected static enum ContainerEventType {
+  protected enum ContainerEventType {
     START_CONTAINER,
     STOP_CONTAINER,
     QUERY_CONTAINER,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81372df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index d18f3dc..5404338 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -740,7 +740,7 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
      }
    }
 
-  private static enum ApplicationReportField {
+  private enum ApplicationReportField {
     ALL, // retrieve all the fields
     USER_AND_ACLS // retrieve user and ACLs info only
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81372df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedAppsEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedAppsEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedAppsEvent.java
index de1b7f4..93c350f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedAppsEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedAppsEvent.java
@@ -43,7 +43,7 @@ public class CMgrCompletedAppsEvent extends ContainerManagerEvent {
     return reason;
   }
 
-  public static enum Reason {
+  public enum Reason {
     /**
      * Application is killed as NodeManager is shut down
      */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81372df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedContainersEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedContainersEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedContainersEvent.java
index 5f7d01e..40aa1dc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedContainersEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedContainersEvent.java
@@ -42,7 +42,7 @@ public class CMgrCompletedContainersEvent extends ContainerManagerEvent {
     return reason;
   }
 
-  public static enum Reason {
+  public enum Reason {
     /**
      * Container is killed as NodeManager is shutting down
      */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81372df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NMAuditLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NMAuditLogger.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NMAuditLogger.java
index 677703e..279e299 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NMAuditLogger.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NMAuditLogger.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 public class NMAuditLogger {
   private static final Log LOG = LogFactory.getLog(NMAuditLogger.class);
 
-  static enum Keys {USER, OPERATION, TARGET, RESULT, IP, 
+  enum Keys {USER, OPERATION, TARGET, RESULT, IP,
                     DESCRIPTION, APPID, CONTAINERID}
 
   public static class AuditConstants {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81372df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
index fc0b265..29ce636 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 public class RMAuditLogger {
   private static final Log LOG = LogFactory.getLog(RMAuditLogger.class);
 
-  static enum Keys {USER, OPERATION, TARGET, RESULT, IP, PERMISSIONS,
+  enum Keys {USER, OPERATION, TARGET, RESULT, IP, PERMISSIONS,
                     DESCRIPTION, APPID, APPATTEMPTID, CONTAINERID, 
                     CALLERCONTEXT, CALLERSIGNATURE, RESOURCE}
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81372df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/AbstractSystemMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/AbstractSystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/AbstractSystemMetricsPublisher.java
index e56fccf..96b153f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/AbstractSystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/AbstractSystemMetricsPublisher.java
@@ -118,7 +118,7 @@ public abstract class AbstractSystemMetricsPublisher extends CompositeService
   /**
    * EventType which is used while publishing the events.
    */
-  protected static enum SystemMetricsEventType {
+  protected enum SystemMetricsEventType {
     PUBLISH_ENTITY, PUBLISH_APPLICATION_FINISHED_ENTITY
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81372df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterList.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterList.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterList.java
index b4c7ad2..7d84232 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterList.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterList.java
@@ -41,7 +41,7 @@ public class TimelineFilterList extends TimelineFilter {
    */
   @Private
   @Unstable
-  public static enum Operator {
+  public enum Operator {
     AND,
     OR
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: HDFS-11741. Long running balancer may fail due to expired DataEncryptionKey. Contributed by Wei-Chiu Chuang and Xiao Chen.

Posted by xg...@apache.org.
HDFS-11741. Long running balancer may fail due to expired DataEncryptionKey. Contributed by Wei-Chiu Chuang and Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a3fc685
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a3fc685
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a3fc685

Branch: refs/heads/YARN-5734
Commit: 6a3fc685a98718742c351ed6625dc7a4dee55e77
Parents: ece3320
Author: Xiao Chen <xi...@apache.org>
Authored: Wed May 31 16:50:33 2017 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Thu Jun 1 14:05:37 2017 -0700

----------------------------------------------------------------------
 .../token/block/BlockTokenSecretManager.java    | 23 ++++--
 .../hadoop/hdfs/server/balancer/KeyManager.java | 33 +++++++-
 .../hdfs/server/balancer/TestKeyManager.java    | 87 ++++++++++++++++++++
 3 files changed, 131 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a3fc685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
index 6b54490..8be22d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.util.Time;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.util.Timer;
 
 /**
  * BlockTokenSecretManager can be instantiated in 2 modes, master mode
@@ -84,6 +85,11 @@ public class BlockTokenSecretManager extends
   private final SecureRandom nonceGenerator = new SecureRandom();
 
   /**
+   * Timer object for querying the current time. Separated out for
+   * unit testing.
+   */
+  private Timer timer;
+  /**
    * Constructor for workers.
    *
    * @param keyUpdateInterval how often a new key will be generated
@@ -130,6 +136,7 @@ public class BlockTokenSecretManager extends
     this.blockPoolId = blockPoolId;
     this.encryptionAlgorithm = encryptionAlgorithm;
     this.useProto = useProto;
+    this.timer = new Timer();
     generateKeys();
   }
 
@@ -160,10 +167,10 @@ public class BlockTokenSecretManager extends
      * more.
      */
     setSerialNo(serialNo + 1);
-    currentKey = new BlockKey(serialNo, Time.now() + 2
+    currentKey = new BlockKey(serialNo, timer.now() + 2
         * keyUpdateInterval + tokenLifetime, generateSecret());
     setSerialNo(serialNo + 1);
-    nextKey = new BlockKey(serialNo, Time.now() + 3
+    nextKey = new BlockKey(serialNo, timer.now() + 3
         * keyUpdateInterval + tokenLifetime, generateSecret());
     allKeys.put(currentKey.getKeyId(), currentKey);
     allKeys.put(nextKey.getKeyId(), nextKey);
@@ -180,7 +187,7 @@ public class BlockTokenSecretManager extends
   }
 
   private synchronized void removeExpiredKeys() {
-    long now = Time.now();
+    long now = timer.now();
     for (Iterator<Map.Entry<Integer, BlockKey>> it = allKeys.entrySet()
         .iterator(); it.hasNext();) {
       Map.Entry<Integer, BlockKey> e = it.next();
@@ -230,15 +237,15 @@ public class BlockTokenSecretManager extends
     removeExpiredKeys();
     // set final expiry date of retiring currentKey
     allKeys.put(currentKey.getKeyId(), new BlockKey(currentKey.getKeyId(),
-        Time.now() + keyUpdateInterval + tokenLifetime,
+        timer.now() + keyUpdateInterval + tokenLifetime,
         currentKey.getKey()));
     // update the estimated expiry date of new currentKey
-    currentKey = new BlockKey(nextKey.getKeyId(), Time.now()
+    currentKey = new BlockKey(nextKey.getKeyId(), timer.now()
         + 2 * keyUpdateInterval + tokenLifetime, nextKey.getKey());
     allKeys.put(currentKey.getKeyId(), currentKey);
     // generate a new nextKey
     setSerialNo(serialNo + 1);
-    nextKey = new BlockKey(serialNo, Time.now() + 3
+    nextKey = new BlockKey(serialNo, timer.now() + 3
         * keyUpdateInterval + tokenLifetime, generateSecret());
     allKeys.put(nextKey.getKeyId(), nextKey);
     return true;
@@ -410,7 +417,7 @@ public class BlockTokenSecretManager extends
     }
     if (key == null)
       throw new IllegalStateException("currentKey hasn't been initialized.");
-    identifier.setExpiryDate(Time.now() + tokenLifetime);
+    identifier.setExpiryDate(timer.now() + tokenLifetime);
     identifier.setKeyId(key.getKeyId());
     if (LOG.isDebugEnabled()) {
       LOG.debug("Generating block token for " + identifier.toString());
@@ -461,7 +468,7 @@ public class BlockTokenSecretManager extends
     }
     byte[] encryptionKey = createPassword(nonce, key.getKey());
     return new DataEncryptionKey(key.getKeyId(), blockPoolId, nonce,
-        encryptionKey, Time.now() + tokenLifetime,
+        encryptionKey, timer.now() + tokenLifetime,
         encryptionAlgorithm);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a3fc685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java
index faf95b7..5644ef7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java
@@ -21,8 +21,6 @@ import java.io.Closeable;
 import java.io.IOException;
 import java.util.EnumSet;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
@@ -37,13 +35,16 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Timer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The class provides utilities for key and token management.
  */
 @InterfaceAudience.Private
 public class KeyManager implements Closeable, DataEncryptionKeyFactory {
-  private static final Log LOG = LogFactory.getLog(KeyManager.class);
+  private static final Logger LOG = LoggerFactory.getLogger(KeyManager.class);
 
   private final NamenodeProtocol namenode;
 
@@ -54,11 +55,17 @@ public class KeyManager implements Closeable, DataEncryptionKeyFactory {
   private final BlockTokenSecretManager blockTokenSecretManager;
   private final BlockKeyUpdater blockKeyUpdater;
   private DataEncryptionKey encryptionKey;
+  /**
+   * Timer object for querying the current time. Separated out for
+   * unit testing.
+   */
+  private Timer timer;
 
   public KeyManager(String blockpoolID, NamenodeProtocol namenode,
       boolean encryptDataTransfer, Configuration conf) throws IOException {
     this.namenode = namenode;
     this.encryptDataTransfer = encryptDataTransfer;
+    this.timer = new Timer();
 
     final ExportedBlockKeys keys = namenode.getBlockKeys();
     this.isBlockTokenEnabled = keys.isBlockTokenEnabled();
@@ -113,7 +120,25 @@ public class KeyManager implements Closeable, DataEncryptionKeyFactory {
   public DataEncryptionKey newDataEncryptionKey() {
     if (encryptDataTransfer) {
       synchronized (this) {
-        if (encryptionKey == null) {
+        if (encryptionKey == null ||
+            encryptionKey.expiryDate < timer.now()) {
+          // Encryption Key (EK) is generated from Block Key (BK).
+          // Check if EK is expired, and generate a new one using the current BK
+          // if so, otherwise continue to use the previously generated EK.
+          //
+          // It's important to make sure that when EK is not expired, the BK
+          // used to generate the EK is not expired and removed, because
+          // the same BK will be used to re-generate the EK
+          // by BlockTokenSecretManager.
+          //
+          // The current implementation ensures that when an EK is not expired
+          // (within tokenLifetime), the BK that's used to generate it
+          // still has at least "keyUpdateInterval" of life time before
+          // the BK gets expired and removed.
+          // See BlockTokenSecretManager for details.
+          LOG.debug("Generating new data encryption key because current key "
+              + (encryptionKey == null ?
+              "is null." : "expired on " + encryptionKey.expiryDate));
           encryptionKey = blockTokenSecretManager.generateDataEncryptionKey();
         }
         return encryptionKey;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a3fc685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestKeyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestKeyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestKeyManager.java
new file mode 100644
index 0000000..58cffb4
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestKeyManager.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.balancer;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
+import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.util.FakeTimer;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.mockito.internal.util.reflection.Whitebox;
+
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test KeyManager class.
+ */
+public class TestKeyManager {
+  @Rule
+  public Timeout globalTimeout = new Timeout(120000);
+
+  @Test
+  public void testNewDataEncryptionKey() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    // Enable data transport encryption and access token
+    conf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
+
+    final long keyUpdateInterval = 2 * 1000;
+    final long tokenLifeTime = keyUpdateInterval;
+    final String blockPoolId = "bp-foo";
+    FakeTimer fakeTimer = new FakeTimer();
+    BlockTokenSecretManager btsm = new BlockTokenSecretManager(
+        keyUpdateInterval, tokenLifeTime, 0, 1, blockPoolId, null, false);
+    Whitebox.setInternalState(btsm, "timer", fakeTimer);
+
+    // When KeyManager asks for block keys, return them from btsm directly
+    NamenodeProtocol namenode = mock(NamenodeProtocol.class);
+    when(namenode.getBlockKeys()).thenReturn(btsm.exportKeys());
+
+    // Instantiate a KeyManager instance and get data encryption key.
+    KeyManager keyManager = new KeyManager(blockPoolId, namenode,
+        true, conf);
+    Whitebox.setInternalState(keyManager, "timer", fakeTimer);
+    Whitebox.setInternalState(
+        Whitebox.getInternalState(keyManager, "blockTokenSecretManager"),
+        "timer", fakeTimer);
+    final DataEncryptionKey dek = keyManager.newDataEncryptionKey();
+    final long remainingTime = dek.expiryDate - fakeTimer.now();
+    assertEquals("KeyManager dataEncryptionKey should expire in 2 seconds",
+        keyUpdateInterval, remainingTime);
+    // advance the timer to expire the block key and data encryption key
+    fakeTimer.advance(keyUpdateInterval + 1);
+
+    // After the initial data encryption key expires, KeyManager should
+    // regenerate a valid data encryption key using the current block key.
+    final DataEncryptionKey dekAfterExpiration =
+        keyManager.newDataEncryptionKey();
+    assertNotEquals("KeyManager should generate a new data encryption key",
+        dek, dekAfterExpiration);
+    assertTrue("KeyManager has an expired DataEncryptionKey!",
+        dekAfterExpiration.expiryDate > fakeTimer.now());
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: YARN-6366. Refactor the NodeManager DeletionService to support additional DeletionTask types. Contributed by Shane Kumpf.

Posted by xg...@apache.org.
YARN-6366. Refactor the NodeManager DeletionService to support additional DeletionTask types. Contributed by Shane Kumpf.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/547f18cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/547f18cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/547f18cb

Branch: refs/heads/YARN-5734
Commit: 547f18cb96aeda55cc19b38be2be4d631b3a5f4f
Parents: 4b4a652
Author: Varun Vasudev <vv...@apache.org>
Authored: Wed May 31 16:15:35 2017 +0530
Committer: Varun Vasudev <vv...@apache.org>
Committed: Wed May 31 16:15:35 2017 +0530

----------------------------------------------------------------------
 .../server/nodemanager/DeletionService.java     | 468 ++++---------------
 .../nodemanager/api/impl/pb/NMProtoUtils.java   | 110 +++++
 .../nodemanager/api/impl/pb/package-info.java   |  25 +
 .../recovery/DeletionTaskRecoveryInfo.java      |  73 +++
 .../deletion/recovery/package-info.java         |  25 +
 .../deletion/task/DeletionTask.java             | 258 ++++++++++
 .../deletion/task/DeletionTaskType.java         |  24 +
 .../deletion/task/FileDeletionTask.java         | 202 ++++++++
 .../deletion/task/package-info.java             |  25 +
 .../localizer/LocalResourcesTrackerImpl.java    |  13 +-
 .../localizer/ResourceLocalizationService.java  |  40 +-
 .../logaggregation/AppLogAggregatorImpl.java    |  60 ++-
 .../loghandler/NonAggregatingLogHandler.java    |   7 +-
 .../yarn_server_nodemanager_recovery.proto      |   1 +
 .../server/nodemanager/TestDeletionService.java |  57 ++-
 .../nodemanager/TestNodeManagerReboot.java      |  99 +---
 .../api/impl/pb/TestNMProtoUtils.java           |  91 ++++
 .../BaseContainerManagerTest.java               |   7 +-
 .../deletion/task/FileDeletionMatcher.java      |  84 ++++
 .../deletion/task/TestFileDeletionTask.java     |  85 ++++
 .../TestLocalResourcesTrackerImpl.java          |   5 +-
 .../TestResourceLocalizationService.java        |  33 +-
 .../TestAppLogAggregatorImpl.java               |  15 +-
 .../TestLogAggregationService.java              |  17 +-
 .../TestNonAggregatingLogHandler.java           |   8 +-
 25 files changed, 1274 insertions(+), 558 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
index aac0af9..38d69a3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
@@ -21,11 +21,8 @@ package org.apache.hadoop.yarn.server.nodemanager;
 import static java.util.concurrent.TimeUnit.SECONDS;
 
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -38,461 +35,176 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.concurrent.HadoopScheduledThreadPoolExecutor;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
-import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
+import org.apache.hadoop.yarn.server.nodemanager.api.impl.pb.NMProtoUtils;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.recovery.DeletionTaskRecoveryInfo;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
-import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredDeletionServiceState;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 public class DeletionService extends AbstractService {
-  static final Log LOG = LogFactory.getLog(DeletionService.class);
+
+  private static final Log LOG = LogFactory.getLog(DeletionService.class);
+
   private int debugDelay;
-  private final ContainerExecutor exec;
-  private ScheduledThreadPoolExecutor sched;
-  private static final FileContext lfs = getLfs();
+  private final ContainerExecutor containerExecutor;
   private final NMStateStoreService stateStore;
+  private ScheduledThreadPoolExecutor sched;
   private AtomicInteger nextTaskId = new AtomicInteger(0);
 
-  static final FileContext getLfs() {
-    try {
-      return FileContext.getLocalFSFileContext();
-    } catch (UnsupportedFileSystemException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
   public DeletionService(ContainerExecutor exec) {
     this(exec, new NMNullStateStoreService());
   }
 
-  public DeletionService(ContainerExecutor exec,
+  public DeletionService(ContainerExecutor containerExecutor,
       NMStateStoreService stateStore) {
     super(DeletionService.class.getName());
-    this.exec = exec;
+    this.containerExecutor = containerExecutor;
     this.debugDelay = 0;
     this.stateStore = stateStore;
   }
-  
-  /**
-   * Delete the path(s) as this user.
-   * @param user The user to delete as, or the JVM user if null
-   * @param subDir the sub directory name
-   * @param baseDirs the base directories which contains the subDir's
-   */
-  public void delete(String user, Path subDir, Path... baseDirs) {
-    // TODO if parent owned by NM, rename within parent inline
-    if (debugDelay != -1) {
-      List<Path> baseDirList = null;
-      if (baseDirs != null && baseDirs.length != 0) {
-        baseDirList = Arrays.asList(baseDirs);
-      }
-      FileDeletionTask task =
-          new FileDeletionTask(this, user, subDir, baseDirList);
-      recordDeletionTaskInStateStore(task);
-      sched.schedule(task, debugDelay, TimeUnit.SECONDS);
-    }
-  }
-  
-  public void scheduleFileDeletionTask(FileDeletionTask fileDeletionTask) {
-    if (debugDelay != -1) {
-      recordDeletionTaskInStateStore(fileDeletionTask);
-      sched.schedule(fileDeletionTask, debugDelay, TimeUnit.SECONDS);
-    }
-  }
-  
-  @Override
-  protected void serviceInit(Configuration conf) throws Exception {
-    ThreadFactory tf = new ThreadFactoryBuilder()
-      .setNameFormat("DeletionService #%d")
-      .build();
-    if (conf != null) {
-      sched = new HadoopScheduledThreadPoolExecutor(
-          conf.getInt(YarnConfiguration.NM_DELETE_THREAD_COUNT,
-          YarnConfiguration.DEFAULT_NM_DELETE_THREAD_COUNT), tf);
-      debugDelay = conf.getInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 0);
-    } else {
-      sched = new HadoopScheduledThreadPoolExecutor(
-          YarnConfiguration.DEFAULT_NM_DELETE_THREAD_COUNT, tf);
-    }
-    sched.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
-    sched.setKeepAliveTime(60L, SECONDS);
-    if (stateStore.canRecover()) {
-      recover(stateStore.loadDeletionServiceState());
-    }
-    super.serviceInit(conf);
-  }
 
-  @Override
-  protected void serviceStop() throws Exception {
-    if (sched != null) {
-      sched.shutdown();
-      boolean terminated = false;
-      try {
-        terminated = sched.awaitTermination(10, SECONDS);
-      } catch (InterruptedException e) {
-      }
-      if (terminated != true) {
-        sched.shutdownNow();
-      }
-    }
-    super.serviceStop();
+  public int getDebugDelay() {
+    return debugDelay;
   }
 
-  /**
-   * Determine if the service has completely stopped.
-   * Used only by unit tests
-   * @return true if service has completely stopped
-   */
-  @Private
-  public boolean isTerminated() {
-    return getServiceState() == STATE.STOPPED && sched.isTerminated();
+  public ContainerExecutor getContainerExecutor() {
+    return containerExecutor;
   }
 
-  public static class FileDeletionTask implements Runnable {
-    public static final int INVALID_TASK_ID = -1;
-    private int taskId;
-    private final String user;
-    private final Path subDir;
-    private final List<Path> baseDirs;
-    private final AtomicInteger numberOfPendingPredecessorTasks;
-    private final Set<FileDeletionTask> successorTaskSet;
-    private final DeletionService delService;
-    // By default all tasks will start as success=true; however if any of
-    // the dependent task fails then it will be marked as false in
-    // fileDeletionTaskFinished().
-    private boolean success;
-    
-    private FileDeletionTask(DeletionService delService, String user,
-        Path subDir, List<Path> baseDirs) {
-      this(INVALID_TASK_ID, delService, user, subDir, baseDirs);
-    }
-
-    private FileDeletionTask(int taskId, DeletionService delService,
-        String user, Path subDir, List<Path> baseDirs) {
-      this.taskId = taskId;
-      this.delService = delService;
-      this.user = user;
-      this.subDir = subDir;
-      this.baseDirs = baseDirs;
-      this.successorTaskSet = new HashSet<FileDeletionTask>();
-      this.numberOfPendingPredecessorTasks = new AtomicInteger(0);
-      success = true;
-    }
-    
-    /**
-     * increments and returns pending predecessor task count
-     */
-    public int incrementAndGetPendingPredecessorTasks() {
-      return numberOfPendingPredecessorTasks.incrementAndGet();
-    }
-    
-    /**
-     * decrements and returns pending predecessor task count
-     */
-    public int decrementAndGetPendingPredecessorTasks() {
-      return numberOfPendingPredecessorTasks.decrementAndGet();
-    }
-    
-    @VisibleForTesting
-    public String getUser() {
-      return this.user;
-    }
-    
-    @VisibleForTesting
-    public Path getSubDir() {
-      return this.subDir;
-    }
-    
-    @VisibleForTesting
-    public List<Path> getBaseDirs() {
-      return this.baseDirs;
-    }
-    
-    public synchronized void setSuccess(boolean success) {
-      this.success = success;
-    }
-    
-    public synchronized boolean getSucess() {
-      return this.success;
-    }
-    
-    public synchronized FileDeletionTask[] getSuccessorTasks() {
-      FileDeletionTask[] successors =
-          new FileDeletionTask[successorTaskSet.size()];
-      return successorTaskSet.toArray(successors);
-    }
+  public NMStateStoreService getStateStore() {
+    return stateStore;
+  }
 
-    @Override
-    public void run() {
+  public void delete(DeletionTask deletionTask) {
+    if (debugDelay != -1) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug(this);
-      }
-      boolean error = false;
-      if (null == user) {
-        if (baseDirs == null || baseDirs.size() == 0) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("NM deleting absolute path : " + subDir);
-          }
-          try {
-            lfs.delete(subDir, true);
-          } catch (IOException e) {
-            error = true;
-            LOG.warn("Failed to delete " + subDir);
-          }
-        } else {
-          for (Path baseDir : baseDirs) {
-            Path del = subDir == null? baseDir : new Path(baseDir, subDir);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("NM deleting path : " + del);
-            }
-            try {
-              lfs.delete(del, true);
-            } catch (IOException e) {
-              error = true;
-              LOG.warn("Failed to delete " + subDir);
-            }
-          }
-        }
-      } else {
-        try {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(
-                "Deleting path: [" + subDir + "] as user: [" + user + "]");
-          }
-          if (baseDirs == null || baseDirs.size() == 0) {
-            delService.exec.deleteAsUser(new DeletionAsUserContext.Builder()
-                .setUser(user)
-                .setSubDir(subDir)
-                .build());
-          } else {
-            delService.exec.deleteAsUser(new DeletionAsUserContext.Builder()
-                .setUser(user)
-                .setSubDir(subDir)
-                .setBasedirs(baseDirs.toArray(new Path[0]))
-                .build());
-          }
-        } catch (IOException e) {
-          error = true;
-          LOG.warn("Failed to delete as user " + user, e);
-        } catch (InterruptedException e) {
-          error = true;
-          LOG.warn("Failed to delete as user " + user, e);
-        }
-      }
-      if (error) {
-        setSuccess(!error);        
-      }
-      fileDeletionTaskFinished();
-    }
-
-    @Override
-    public String toString() {
-      StringBuffer sb = new StringBuffer("\nFileDeletionTask : ");
-      sb.append("  user : ").append(this.user);
-      sb.append("  subDir : ").append(
-        subDir == null ? "null" : subDir.toString());
-      sb.append("  baseDir : ");
-      if (baseDirs == null || baseDirs.size() == 0) {
-        sb.append("null");
-      } else {
-        for (Path baseDir : baseDirs) {
-          sb.append(baseDir.toString()).append(',');
-        }
-      }
-      return sb.toString();
-    }
-    
-    /**
-     * If there is a task dependency between say tasks 1,2,3 such that
-     * task2 and task3 can be started only after task1 then we should define
-     * task2 and task3 as successor tasks for task1.
-     * Note:- Task dependency should be defined prior to
-     * @param successorTask
-     */
-    public synchronized void addFileDeletionTaskDependency(
-        FileDeletionTask successorTask) {
-      if (successorTaskSet.add(successorTask)) {
-        successorTask.incrementAndGetPendingPredecessorTasks();
+        String msg = String.format("Scheduling DeletionTask (delay %d) : %s",
+            debugDelay, deletionTask.toString());
+        LOG.debug(msg);
       }
+      recordDeletionTaskInStateStore(deletionTask);
+      sched.schedule(deletionTask, debugDelay, TimeUnit.SECONDS);
     }
-    
-    /*
-     * This is called when
-     * 1) Current file deletion task ran and finished.
-     * 2) This can be even directly called by predecessor task if one of the
-     * dependent tasks of it has failed marking its success = false.  
-     */
-    private synchronized void fileDeletionTaskFinished() {
-      try {
-        delService.stateStore.removeDeletionTask(taskId);
-      } catch (IOException e) {
-        LOG.error("Unable to remove deletion task " + taskId
-            + " from state store", e);
-      }
-      Iterator<FileDeletionTask> successorTaskI =
-          this.successorTaskSet.iterator();
-      while (successorTaskI.hasNext()) {
-        FileDeletionTask successorTask = successorTaskI.next();
-        if (!success) {
-          successorTask.setSuccess(success);
-        }
-        int count = successorTask.decrementAndGetPendingPredecessorTasks();
-        if (count == 0) {
-          if (successorTask.getSucess()) {
-            successorTask.delService.scheduleFileDeletionTask(successorTask);
-          } else {
-            successorTask.fileDeletionTaskFinished();
-          }
-        }
-      }
-    }
-  }
-  
-  /**
-   * Helper method to create file deletion task. To be used only if we need
-   * a way to define dependencies between deletion tasks.
-   * @param user user on whose behalf this task is suppose to run
-   * @param subDir sub directory as required in 
-   * {@link DeletionService#delete(String, Path, Path...)}
-   * @param baseDirs base directories as required in
-   * {@link DeletionService#delete(String, Path, Path...)}
-   */
-  public FileDeletionTask createFileDeletionTask(String user, Path subDir,
-      Path[] baseDirs) {
-    return new FileDeletionTask(this, user, subDir, Arrays.asList(baseDirs));
   }
 
-  private void recover(RecoveredDeletionServiceState state)
+  private void recover(NMStateStoreService.RecoveredDeletionServiceState state)
       throws IOException {
     List<DeletionServiceDeleteTaskProto> taskProtos = state.getTasks();
     Map<Integer, DeletionTaskRecoveryInfo> idToInfoMap =
-        new HashMap<Integer, DeletionTaskRecoveryInfo>(taskProtos.size());
-    Set<Integer> successorTasks = new HashSet<Integer>();
+        new HashMap<>(taskProtos.size());
+    Set<Integer> successorTasks = new HashSet<>();
     for (DeletionServiceDeleteTaskProto proto : taskProtos) {
-      DeletionTaskRecoveryInfo info = parseTaskProto(proto);
-      idToInfoMap.put(info.task.taskId, info);
-      nextTaskId.set(Math.max(nextTaskId.get(), info.task.taskId));
-      successorTasks.addAll(info.successorTaskIds);
+      DeletionTaskRecoveryInfo info =
+          NMProtoUtils.convertProtoToDeletionTaskRecoveryInfo(proto, this);
+      idToInfoMap.put(info.getTask().getTaskId(), info);
+      nextTaskId.set(Math.max(nextTaskId.get(), info.getTask().getTaskId()));
+      successorTasks.addAll(info.getSuccessorTaskIds());
     }
 
     // restore the task dependencies and schedule the deletion tasks that
     // have no predecessors
     final long now = System.currentTimeMillis();
     for (DeletionTaskRecoveryInfo info : idToInfoMap.values()) {
-      for (Integer successorId : info.successorTaskIds){
+      for (Integer successorId : info.getSuccessorTaskIds()){
         DeletionTaskRecoveryInfo successor = idToInfoMap.get(successorId);
         if (successor != null) {
-          info.task.addFileDeletionTaskDependency(successor.task);
+          info.getTask().addDeletionTaskDependency(successor.getTask());
         } else {
           LOG.error("Unable to locate dependency task for deletion task "
-              + info.task.taskId + " at " + info.task.getSubDir());
+              + info.getTask().getTaskId());
         }
       }
-      if (!successorTasks.contains(info.task.taskId)) {
-        long msecTilDeletion = info.deletionTimestamp - now;
-        sched.schedule(info.task, msecTilDeletion, TimeUnit.MILLISECONDS);
+      if (!successorTasks.contains(info.getTask().getTaskId())) {
+        long msecTilDeletion = info.getDeletionTimestamp() - now;
+        sched.schedule(info.getTask(), msecTilDeletion, TimeUnit.MILLISECONDS);
       }
     }
   }
 
-  private DeletionTaskRecoveryInfo parseTaskProto(
-      DeletionServiceDeleteTaskProto proto) throws IOException {
-    int taskId = proto.getId();
-    String user = proto.hasUser() ? proto.getUser() : null;
-    Path subdir = null;
-    List<Path> basePaths = null;
-    if (proto.hasSubdir()) {
-      subdir = new Path(proto.getSubdir());
-    }
-    List<String> basedirs = proto.getBasedirsList();
-    if (basedirs != null && basedirs.size() > 0) {
-      basePaths = new ArrayList<Path>(basedirs.size());
-      for (String basedir : basedirs) {
-        basePaths.add(new Path(basedir));
-      }
-    }
-
-    FileDeletionTask task = new FileDeletionTask(taskId, this, user,
-        subdir, basePaths);
-    return new DeletionTaskRecoveryInfo(task,
-        proto.getSuccessorIdsList(),
-        proto.getDeletionTime());
-  }
-
   private int generateTaskId() {
     // get the next ID but avoid an invalid ID
     int taskId = nextTaskId.incrementAndGet();
-    while (taskId == FileDeletionTask.INVALID_TASK_ID) {
+    while (taskId == DeletionTask.INVALID_TASK_ID) {
       taskId = nextTaskId.incrementAndGet();
     }
     return taskId;
   }
 
-  private void recordDeletionTaskInStateStore(FileDeletionTask task) {
+  private void recordDeletionTaskInStateStore(DeletionTask task) {
     if (!stateStore.canRecover()) {
       // optimize the case where we aren't really recording
       return;
     }
-    if (task.taskId != FileDeletionTask.INVALID_TASK_ID) {
+    if (task.getTaskId() != DeletionTask.INVALID_TASK_ID) {
       return;  // task already recorded
     }
 
-    task.taskId = generateTaskId();
-
-    FileDeletionTask[] successors = task.getSuccessorTasks();
+    task.setTaskId(generateTaskId());
 
     // store successors first to ensure task IDs have been generated for them
-    for (FileDeletionTask successor : successors) {
+    DeletionTask[] successors = task.getSuccessorTasks();
+    for (DeletionTask successor : successors) {
       recordDeletionTaskInStateStore(successor);
     }
 
-    DeletionServiceDeleteTaskProto.Builder builder =
-        DeletionServiceDeleteTaskProto.newBuilder();
-    builder.setId(task.taskId);
-    if (task.getUser() != null) {
-      builder.setUser(task.getUser());
-    }
-    if (task.getSubDir() != null) {
-      builder.setSubdir(task.getSubDir().toString());
-    }
-    builder.setDeletionTime(System.currentTimeMillis() +
-        TimeUnit.MILLISECONDS.convert(debugDelay, TimeUnit.SECONDS));
-    if (task.getBaseDirs() != null) {
-      for (Path dir : task.getBaseDirs()) {
-        builder.addBasedirs(dir.toString());
-      }
-    }
-    for (FileDeletionTask successor : successors) {
-      builder.addSuccessorIds(successor.taskId);
-    }
-
     try {
-      stateStore.storeDeletionTask(task.taskId, builder.build());
+      stateStore.storeDeletionTask(task.getTaskId(),
+          task.convertDeletionTaskToProto());
     } catch (IOException e) {
-      LOG.error("Unable to store deletion task " + task.taskId + " for "
-          + task.getSubDir(), e);
+      LOG.error("Unable to store deletion task " + task.getTaskId(), e);
     }
   }
 
-  private static class DeletionTaskRecoveryInfo {
-    FileDeletionTask task;
-    List<Integer> successorTaskIds;
-    long deletionTimestamp;
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    ThreadFactory tf = new ThreadFactoryBuilder()
+        .setNameFormat("DeletionService #%d")
+        .build();
+    if (conf != null) {
+      sched = new HadoopScheduledThreadPoolExecutor(
+          conf.getInt(YarnConfiguration.NM_DELETE_THREAD_COUNT,
+              YarnConfiguration.DEFAULT_NM_DELETE_THREAD_COUNT), tf);
+      debugDelay = conf.getInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 0);
+    } else {
+      sched = new HadoopScheduledThreadPoolExecutor(
+          YarnConfiguration.DEFAULT_NM_DELETE_THREAD_COUNT, tf);
+    }
+    sched.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
+    sched.setKeepAliveTime(60L, SECONDS);
+    if (stateStore.canRecover()) {
+      recover(stateStore.loadDeletionServiceState());
+    }
+    super.serviceInit(conf);
+  }
 
-    public DeletionTaskRecoveryInfo(FileDeletionTask task,
-        List<Integer> successorTaskIds, long deletionTimestamp) {
-      this.task = task;
-      this.successorTaskIds = successorTaskIds;
-      this.deletionTimestamp = deletionTimestamp;
+  @Override
+  public void serviceStop() throws Exception {
+    if (sched != null) {
+      sched.shutdown();
+      boolean terminated = false;
+      try {
+        terminated = sched.awaitTermination(10, SECONDS);
+      } catch (InterruptedException e) { }
+      if (!terminated) {
+        sched.shutdownNow();
+      }
     }
+    super.serviceStop();
+  }
+
+  /**
+   * Determine if the service has completely stopped.
+   * Used only by unit tests
+   * @return true if service has completely stopped
+   */
+  @Private
+  public boolean isTerminated() {
+    return getServiceState() == STATE.STOPPED && sched.isTerminated();
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java
new file mode 100644
index 0000000..e47b3ee
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.api.impl.pb;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.recovery.DeletionTaskRecoveryInfo;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTaskType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Utilities for converting from PB representations.
+ */
+public final class NMProtoUtils {
+
+  private static final Log LOG = LogFactory.getLog(NMProtoUtils.class);
+
+  private NMProtoUtils() { }
+
+  /**
+   * Convert the Protobuf representation into a {@link DeletionTask}.
+   *
+   * @param proto             the Protobuf representation for the DeletionTask
+   * @param deletionService   the {@link DeletionService}
+   * @return the converted {@link DeletionTask}
+   */
+  public static DeletionTask convertProtoToDeletionTask(
+      DeletionServiceDeleteTaskProto proto, DeletionService deletionService) {
+    int taskId = proto.getId();
+    if (proto.hasTaskType() && proto.getTaskType() != null) {
+      if (proto.getTaskType().equals(DeletionTaskType.FILE.name())) {
+        LOG.debug("Converting recovered FileDeletionTask");
+        return convertProtoToFileDeletionTask(proto, deletionService, taskId);
+      }
+    }
+    LOG.debug("Unable to get task type, trying FileDeletionTask");
+    return convertProtoToFileDeletionTask(proto, deletionService, taskId);
+  }
+
+  /**
+   * Convert the Protobuf representation into the {@link FileDeletionTask}.
+   *
+   * @param proto the Protobuf representation of the {@link FileDeletionTask}
+   * @param deletionService the {@link DeletionService}.
+   * @param taskId the ID of the {@link DeletionTask}.
+   * @return the populated {@link FileDeletionTask}.
+   */
+  public static FileDeletionTask convertProtoToFileDeletionTask(
+      DeletionServiceDeleteTaskProto proto, DeletionService deletionService,
+      int taskId) {
+    String user = proto.hasUser() ? proto.getUser() : null;
+    Path subdir = null;
+    if (proto.hasSubdir()) {
+      subdir = new Path(proto.getSubdir());
+    }
+    List<Path> basePaths = null;
+    List<String> basedirs = proto.getBasedirsList();
+    if (basedirs != null && basedirs.size() > 0) {
+      basePaths = new ArrayList<>(basedirs.size());
+      for (String basedir : basedirs) {
+        basePaths.add(new Path(basedir));
+      }
+    }
+    return new FileDeletionTask(taskId, deletionService, user, subdir,
+        basePaths);
+  }
+
+  /**
+   * Convert the Protobuf representation to the {@link DeletionTaskRecoveryInfo}
+   * representation.
+   *
+   * @param proto the Protobuf representation of the {@link DeletionTask}
+   * @param deletionService the {@link DeletionService}
+   * @return the populated {@link DeletionTaskRecoveryInfo}
+   */
+  public static DeletionTaskRecoveryInfo convertProtoToDeletionTaskRecoveryInfo(
+      DeletionServiceDeleteTaskProto proto, DeletionService deletionService) {
+    DeletionTask deletionTask =
+        NMProtoUtils.convertProtoToDeletionTask(proto, deletionService);
+    List<Integer> successorTaskIds = new ArrayList<>();
+    if (proto.getSuccessorIdsList() != null &&
+        !proto.getSuccessorIdsList().isEmpty()) {
+      successorTaskIds = proto.getSuccessorIdsList();
+    }
+    long deletionTimestamp = proto.getDeletionTime();
+    return new DeletionTaskRecoveryInfo(deletionTask, successorTaskIds,
+        deletionTimestamp);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/package-info.java
new file mode 100644
index 0000000..006f49f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/package-info.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Package containing classes for working with Protobuf.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.nodemanager.api.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/recovery/DeletionTaskRecoveryInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/recovery/DeletionTaskRecoveryInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/recovery/DeletionTaskRecoveryInfo.java
new file mode 100644
index 0000000..c62ea02
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/recovery/DeletionTaskRecoveryInfo.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.recovery;
+
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
+
+import java.util.List;
+
+/**
+ * Encapsulates the recovery info needed to recover a DeletionTask from the NM
+ * state store.
+ */
+public class DeletionTaskRecoveryInfo {
+
+  private DeletionTask task;
+  private List<Integer> successorTaskIds;
+  private long deletionTimestamp;
+
+  /**
+   * Information needed for recovering the DeletionTask.
+   *
+   * @param task the DeletionTask
+   * @param successorTaskIds the dependent DeletionTasks.
+   * @param deletionTimestamp the scheduled times of deletion.
+   */
+  public DeletionTaskRecoveryInfo(DeletionTask task,
+      List<Integer> successorTaskIds, long deletionTimestamp) {
+    this.task = task;
+    this.successorTaskIds = successorTaskIds;
+    this.deletionTimestamp = deletionTimestamp;
+  }
+
+  /**
+   * Return the recovered DeletionTask.
+   *
+   * @return the recovered DeletionTask.
+   */
+  public DeletionTask getTask() {
+    return task;
+  }
+
+  /**
+   * Return all of the dependent DeletionTasks.
+   *
+   * @return the dependent DeletionTasks.
+   */
+  public List<Integer> getSuccessorTaskIds() {
+    return successorTaskIds;
+  }
+
+  /**
+   * Return the deletion timestamp.
+   *
+   * @return the deletion timestamp.
+   */
+  public long getDeletionTimestamp() {
+    return deletionTimestamp;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/recovery/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/recovery/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/recovery/package-info.java
new file mode 100644
index 0000000..28d7f62
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/recovery/package-info.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Package containing classes for recovering DeletionTasks.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.recovery;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTask.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTask.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTask.java
new file mode 100644
index 0000000..635d7a9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTask.java
@@ -0,0 +1,258 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * DeletionTasks are supplied to the {@link DeletionService} for deletion.
+ */
+public abstract class DeletionTask implements Runnable {
+
+  static final Log LOG = LogFactory.getLog(DeletionTask.class);
+
+  public static final int INVALID_TASK_ID = -1;
+
+  private int taskId;
+  private String user;
+  private DeletionTaskType deletionTaskType;
+  private DeletionService deletionService;
+  private final AtomicInteger numberOfPendingPredecessorTasks;
+  private final Set<DeletionTask> successorTaskSet;
+  // By default all tasks will start as success=true; however if any of
+  // the dependent task fails then it will be marked as false in
+  // deletionTaskFinished().
+  private boolean success;
+
+  /**
+   * Deletion task with taskId and default values.
+   *
+   * @param taskId              the ID of the task, if previously set.
+   * @param deletionService     the {@link DeletionService}.
+   * @param user                the user associated with the delete.
+   * @param deletionTaskType    the {@link DeletionTaskType}.
+   */
+  public DeletionTask(int taskId, DeletionService deletionService, String user,
+      DeletionTaskType deletionTaskType) {
+    this(taskId, deletionService, user, new AtomicInteger(0),
+        new HashSet<DeletionTask>(), deletionTaskType);
+  }
+
+  /**
+   * Deletion task with taskId and user supplied values.
+   *
+   * @param taskId              the ID of the task, if previously set.
+   * @param deletionService     the {@link DeletionService}.
+   * @param user                the user associated with the delete.
+   * @param numberOfPendingPredecessorTasks  Number of pending tasks.
+   * @param successorTaskSet    the list of successor DeletionTasks
+   * @param deletionTaskType    the {@link DeletionTaskType}.
+   */
+  public DeletionTask(int taskId, DeletionService deletionService, String user,
+      AtomicInteger numberOfPendingPredecessorTasks,
+      Set<DeletionTask> successorTaskSet, DeletionTaskType deletionTaskType) {
+    this.taskId = taskId;
+    this.deletionService = deletionService;
+    this.user = user;
+    this.numberOfPendingPredecessorTasks = numberOfPendingPredecessorTasks;
+    this.successorTaskSet = successorTaskSet;
+    this.deletionTaskType = deletionTaskType;
+    success = true;
+  }
+
+  /**
+   * Get the taskId for the DeletionTask.
+   *
+   * @return the taskId.
+   */
+  public int getTaskId() {
+    return taskId;
+  }
+
+  /**
+   * Set the taskId for the DeletionTask.
+   *
+   * @param taskId the taskId.
+   */
+  public void setTaskId(int taskId) {
+    this.taskId = taskId;
+  }
+
+  /**
+   * The the user assoicated with the DeletionTask.
+   *
+   * @return the user name.
+   */
+  public String getUser() {
+    return user;
+  }
+
+  /**
+   * Get the {@link DeletionService} for this DeletionTask.
+   *
+   * @return the {@link DeletionService}.
+   */
+  public DeletionService getDeletionService() {
+    return deletionService;
+  }
+
+  /**
+   * Get the {@link DeletionTaskType} for this DeletionTask.
+   *
+   * @return the {@link DeletionTaskType}.
+   */
+  public DeletionTaskType getDeletionTaskType() {
+    return deletionTaskType;
+  }
+
+  /**
+   * Set the DeletionTask run status.
+   *
+   * @param success the status of the running DeletionTask.
+   */
+  public synchronized void setSuccess(boolean success) {
+    this.success = success;
+  }
+
+  /**
+   * Return the DeletionTask run status.
+   *
+   * @return the status of the running DeletionTask.
+   */
+  public synchronized boolean getSucess() {
+    return this.success;
+  }
+
+  /**
+   * Return the list of successor tasks for the DeletionTask.
+   *
+   * @return the list of successor tasks.
+   */
+  public synchronized DeletionTask[] getSuccessorTasks() {
+    DeletionTask[] successors = new DeletionTask[successorTaskSet.size()];
+    return successorTaskSet.toArray(successors);
+  }
+
+  /**
+   * Convert the DeletionTask to the Protobuf representation for storing in the
+   * state store and recovery.
+   *
+   * @return the protobuf representation of the DeletionTask.
+   */
+  public abstract DeletionServiceDeleteTaskProto convertDeletionTaskToProto();
+
+  /**
+   * Add a dependent DeletionTask.
+   *
+   * If there is a task dependency between say tasks 1,2,3 such that
+   * task2 and task3 can be started only after task1 then we should define
+   * task2 and task3 as successor tasks for task1.
+   * Note:- Task dependency should be defined prior to calling delete.
+   *
+   * @param successorTask the DeletionTask the depends on this DeletionTask.
+   */
+  public synchronized void addDeletionTaskDependency(
+      DeletionTask successorTask) {
+    if (successorTaskSet.add(successorTask)) {
+      successorTask.incrementAndGetPendingPredecessorTasks();
+    }
+  }
+
+  /**
+   * Increments and returns pending predecessor task count.
+   *
+   * @return the number of pending predecessor DeletionTasks.
+   */
+  public int incrementAndGetPendingPredecessorTasks() {
+    return numberOfPendingPredecessorTasks.incrementAndGet();
+  }
+
+  /**
+   * Decrements and returns pending predecessor task count.
+   *
+   * @return the number of pending predecessor DeletionTasks.
+   */
+  public int decrementAndGetPendingPredecessorTasks() {
+    return numberOfPendingPredecessorTasks.decrementAndGet();
+  }
+
+  /**
+   * Removes the DeletionTask from the state store and validates that successor
+   * tasks have been scheduled and completed.
+   *
+   * This is called when:
+   * 1) Current deletion task ran and finished.
+   * 2) When directly called by predecessor task if one of the
+   * dependent tasks of it has failed marking its success = false.
+   */
+  synchronized void deletionTaskFinished() {
+    try {
+      NMStateStoreService stateStore = deletionService.getStateStore();
+      stateStore.removeDeletionTask(taskId);
+    } catch (IOException e) {
+      LOG.error("Unable to remove deletion task " + taskId
+          + " from state store", e);
+    }
+    Iterator<DeletionTask> successorTaskI = this.successorTaskSet.iterator();
+    while (successorTaskI.hasNext()) {
+      DeletionTask successorTask = successorTaskI.next();
+      if (!success) {
+        successorTask.setSuccess(success);
+      }
+      int count = successorTask.decrementAndGetPendingPredecessorTasks();
+      if (count == 0) {
+        if (successorTask.getSucess()) {
+          successorTask.deletionService.delete(successorTask);
+        } else {
+          successorTask.deletionTaskFinished();
+        }
+      }
+    }
+  }
+
+  /**
+   * Return the Protobuf builder with the base DeletionTask attributes.
+   *
+   * @return pre-populated Buidler with the base attributes.
+   */
+  DeletionServiceDeleteTaskProto.Builder getBaseDeletionTaskProtoBuilder() {
+    DeletionServiceDeleteTaskProto.Builder builder =
+        DeletionServiceDeleteTaskProto.newBuilder();
+    builder.setId(getTaskId());
+    if (getUser() != null) {
+      builder.setUser(getUser());
+    }
+    builder.setDeletionTime(System.currentTimeMillis() +
+        TimeUnit.MILLISECONDS.convert(getDeletionService().getDebugDelay(),
+            TimeUnit.SECONDS));
+    for (DeletionTask successor : getSuccessorTasks()) {
+      builder.addSuccessorIds(successor.getTaskId());
+    }
+    return builder;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTaskType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTaskType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTaskType.java
new file mode 100644
index 0000000..676c71b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTaskType.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task;
+
+/**
+ * Available types of {@link DeletionTask}s.
+ */
+public enum DeletionTaskType {
+  FILE
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionTask.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionTask.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionTask.java
new file mode 100644
index 0000000..fd07f16
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionTask.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task;
+
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * {@link DeletionTask} handling the removal of files (and directories).
+ */
+public class FileDeletionTask extends DeletionTask implements Runnable {
+
+  private final Path subDir;
+  private final List<Path> baseDirs;
+  private static final FileContext lfs = getLfs();
+
+  private static FileContext getLfs() {
+    try {
+      return FileContext.getLocalFSFileContext();
+    } catch (UnsupportedFileSystemException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * Construct a FileDeletionTask with the default INVALID_TASK_ID.
+   *
+   * @param deletionService     the {@link DeletionService}.
+   * @param user                the user deleting the file.
+   * @param subDir              the subdirectory to delete.
+   * @param baseDirs            the base directories containing the subdir.
+   */
+  public FileDeletionTask(DeletionService deletionService, String user,
+      Path subDir, List<Path> baseDirs) {
+    this(INVALID_TASK_ID, deletionService, user, subDir, baseDirs);
+  }
+
+  /**
+   * Construct a FileDeletionTask with the default INVALID_TASK_ID.
+   *
+   * @param taskId              the ID of the task, if previously set.
+   * @param deletionService     the {@link DeletionService}.
+   * @param user                the user deleting the file.
+   * @param subDir              the subdirectory to delete.
+   * @param baseDirs            the base directories containing the subdir.
+   */
+  public FileDeletionTask(int taskId, DeletionService deletionService,
+      String user, Path subDir, List<Path> baseDirs) {
+    super(taskId, deletionService, user, DeletionTaskType.FILE);
+    this.subDir = subDir;
+    this.baseDirs = baseDirs;
+  }
+
+  /**
+   * Get the subdirectory to delete.
+   *
+   * @return the subDir for the FileDeletionTask.
+   */
+  public Path getSubDir() {
+    return this.subDir;
+  }
+
+  /**
+   * Get the base directories containing the subdirectory.
+   *
+   * @return the base directories for the FileDeletionTask.
+   */
+  public List<Path> getBaseDirs() {
+    return this.baseDirs;
+  }
+
+  /**
+   * Delete the specified file/directory as the specified user.
+   */
+  @Override
+  public void run() {
+    if (LOG.isDebugEnabled()) {
+      String msg = String.format("Running DeletionTask : %s", toString());
+      LOG.debug(msg);
+    }
+    boolean error = false;
+    if (null == getUser()) {
+      if (baseDirs == null || baseDirs.size() == 0) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("NM deleting absolute path : " + subDir);
+        }
+        try {
+          lfs.delete(subDir, true);
+        } catch (IOException e) {
+          error = true;
+          LOG.warn("Failed to delete " + subDir);
+        }
+      } else {
+        for (Path baseDir : baseDirs) {
+          Path del = subDir == null? baseDir : new Path(baseDir, subDir);
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("NM deleting path : " + del);
+          }
+          try {
+            lfs.delete(del, true);
+          } catch (IOException e) {
+            error = true;
+            LOG.warn("Failed to delete " + subDir);
+          }
+        }
+      }
+    } else {
+      try {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(
+              "Deleting path: [" + subDir + "] as user: [" + getUser() + "]");
+        }
+        if (baseDirs == null || baseDirs.size() == 0) {
+          getDeletionService().getContainerExecutor().deleteAsUser(
+              new DeletionAsUserContext.Builder()
+              .setUser(getUser())
+              .setSubDir(subDir)
+              .build());
+        } else {
+          getDeletionService().getContainerExecutor().deleteAsUser(
+              new DeletionAsUserContext.Builder()
+              .setUser(getUser())
+              .setSubDir(subDir)
+              .setBasedirs(baseDirs.toArray(new Path[0]))
+              .build());
+        }
+      } catch (IOException|InterruptedException e) {
+        error = true;
+        LOG.warn("Failed to delete as user " + getUser(), e);
+      }
+    }
+    if (error) {
+      setSuccess(!error);
+    }
+    deletionTaskFinished();
+  }
+
+  /**
+   * Convert the FileDeletionTask to a String representation.
+   *
+   * @return String representation of the FileDeletionTask.
+   */
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("FileDeletionTask :");
+    sb.append("  id : ").append(getTaskId());
+    sb.append("  user : ").append(getUser());
+    sb.append("  subDir : ").append(
+        subDir == null ? "null" : subDir.toString());
+    sb.append("  baseDir : ");
+    if (baseDirs == null || baseDirs.size() == 0) {
+      sb.append("null");
+    } else {
+      for (Path baseDir : baseDirs) {
+        sb.append(baseDir.toString()).append(',');
+      }
+    }
+    return sb.toString().trim();
+  }
+
+  /**
+   * Convert the FileDeletionTask to the Protobuf representation for storing
+   * in the state store and recovery.
+   *
+   * @return the protobuf representation of the FileDeletionTask.
+   */
+  public DeletionServiceDeleteTaskProto convertDeletionTaskToProto() {
+    DeletionServiceDeleteTaskProto.Builder builder =
+        getBaseDeletionTaskProtoBuilder();
+    builder.setTaskType(DeletionTaskType.FILE.name());
+    if (getSubDir() != null) {
+      builder.setSubdir(getSubDir().toString());
+    }
+    if (getBaseDirs() != null) {
+      for (Path dir : getBaseDirs()) {
+        builder.addBasedirs(dir.toString());
+      }
+    }
+    return builder.build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/package-info.java
new file mode 100644
index 0000000..f1a3985
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/package-info.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Package containing DeletionTasks for use with the DeletionService.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
index af34e92..47e6a55 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceEventType;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceRecoveredEvent;
@@ -113,9 +114,9 @@ class LocalResourcesTrackerImpl implements LocalResourcesTracker {
     this.useLocalCacheDirectoryManager = useLocalCacheDirectoryManager;
     if (this.useLocalCacheDirectoryManager) {
       directoryManagers =
-          new ConcurrentHashMap<Path, LocalCacheDirectoryManager>();
+          new ConcurrentHashMap<>();
       inProgressLocalResourcesMap =
-          new ConcurrentHashMap<LocalResourceRequest, Path>();
+          new ConcurrentHashMap<>();
     }
     this.conf = conf;
     this.stateStore = stateStore;
@@ -393,7 +394,9 @@ class LocalResourcesTrackerImpl implements LocalResourcesTracker {
       return false;
     } else { // ResourceState is LOCALIZED or INIT
       if (ResourceState.LOCALIZED.equals(rsrc.getState())) {
-        delService.delete(getUser(), getPathToDelete(rsrc.getLocalPath()));
+        FileDeletionTask deletionTask = new FileDeletionTask(delService,
+            getUser(), getPathToDelete(rsrc.getLocalPath()), null);
+        delService.delete(deletionTask);
       }
       removeResource(rem.getRequest());
       LOG.info("Removed " + rsrc.getLocalPath() + " from localized cache");
@@ -488,7 +491,9 @@ class LocalResourcesTrackerImpl implements LocalResourcesTracker {
       LOG.warn("Directory " + uniquePath + " already exists, " +
           "try next one.");
       if (delService != null) {
-        delService.delete(getUser(), uniquePath);
+        FileDeletionTask deletionTask = new FileDeletionTask(delService,
+            getUser(), uniquePath, null);
+        delService.delete(deletionTask);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 663bad7..5bc0da7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -95,7 +95,6 @@ import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.Localize
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
-import org.apache.hadoop.yarn.server.nodemanager.DeletionService.FileDeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.DirectoryCollection.DirsChangeListener;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol;
@@ -113,6 +112,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResourceFailedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalCacheCleaner.LocalCacheCleanerStats;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ApplicationLocalizationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationCleanupEvent;
@@ -604,7 +604,9 @@ public class ResourceLocalizationService extends CompositeService
   private void submitDirForDeletion(String userName, Path dir) {
     try {
       lfs.getFileStatus(dir);
-      delService.delete(userName, dir, new Path[] {});
+      FileDeletionTask deletionTask = new FileDeletionTask(delService, userName,
+          dir, null);
+      delService.delete(deletionTask);
     } catch (UnsupportedFileSystemException ue) {
       LOG.warn("Local dir " + dir + " is an unsupported filesystem", ue);
     } catch (IOException ie) {
@@ -1234,10 +1236,13 @@ public class ResourceLocalizationService extends CompositeService
           event.getResource().unlock();
         }
         if (!paths.isEmpty()) {
-          delService.delete(context.getUser(),
-              null, paths.toArray(new Path[paths.size()]));
+          FileDeletionTask deletionTask = new FileDeletionTask(delService,
+              context.getUser(), null, paths);
+          delService.delete(deletionTask);
         }
-        delService.delete(null, nmPrivateCTokensPath, new Path[] {});
+        FileDeletionTask deletionTask = new FileDeletionTask(delService, null,
+            nmPrivateCTokensPath, null);
+        delService.delete(deletionTask);
       }
     }
 
@@ -1456,7 +1461,9 @@ public class ResourceLocalizationService extends CompositeService
         String appName = fileStatus.getPath().getName();
         if (appName.matches("^application_\\d+_\\d+_DEL_\\d+$")) {
           LOG.info("delete app log dir," + appName);
-          del.delete(null, fileStatus.getPath());
+          FileDeletionTask deletionTask = new FileDeletionTask(del, null,
+              fileStatus.getPath(), null);
+          del.delete(deletionTask);
         }
       }
     }
@@ -1516,7 +1523,9 @@ public class ResourceLocalizationService extends CompositeService
               ||
               status.getPath().getName()
                   .matches(".*" + ContainerLocalizer.FILECACHE + "_DEL_.*")) {
-            del.delete(null, status.getPath(), new Path[] {});
+            FileDeletionTask deletionTask = new FileDeletionTask(del, null,
+                status.getPath(), null);
+            del.delete(deletionTask);
           }
         } catch (IOException ex) {
           // Do nothing, just give the warning
@@ -1530,24 +1539,25 @@ public class ResourceLocalizationService extends CompositeService
   private void cleanUpFilesPerUserDir(FileContext lfs, DeletionService del,
       Path userDirPath) throws IOException {
     RemoteIterator<FileStatus> userDirStatus = lfs.listStatus(userDirPath);
-    FileDeletionTask dependentDeletionTask =
-        del.createFileDeletionTask(null, userDirPath, new Path[] {});
+    FileDeletionTask dependentDeletionTask = new FileDeletionTask(del, null,
+        userDirPath, new ArrayList<Path>());
     if (userDirStatus != null && userDirStatus.hasNext()) {
       List<FileDeletionTask> deletionTasks = new ArrayList<FileDeletionTask>();
       while (userDirStatus.hasNext()) {
         FileStatus status = userDirStatus.next();
         String owner = status.getOwner();
-        FileDeletionTask deletionTask =
-            del.createFileDeletionTask(owner, null,
-              new Path[] { status.getPath() });
-        deletionTask.addFileDeletionTaskDependency(dependentDeletionTask);
+        List<Path> pathList = new ArrayList<>();
+        pathList.add(status.getPath());
+        FileDeletionTask deletionTask = new FileDeletionTask(del, owner, null,
+            pathList);
+        deletionTask.addDeletionTaskDependency(dependentDeletionTask);
         deletionTasks.add(deletionTask);
       }
       for (FileDeletionTask task : deletionTasks) {
-        del.scheduleFileDeletionTask(task);
+        del.delete(task);
       }
     } else {
-      del.scheduleFileDeletionTask(dependentDeletionTask);
+      del.delete(dependentDeletionTask);
     }
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index f465534..0d9e686 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -69,6 +69,8 @@ import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Times;
@@ -258,19 +260,7 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
       return;
     }
 
-    if (UserGroupInformation.isSecurityEnabled()) {
-      Credentials systemCredentials =
-          context.getSystemCredentialsForApps().get(appId);
-      if (systemCredentials != null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Adding new framework-token for " + appId
-              + " for log-aggregation: " + systemCredentials.getAllTokens()
-              + "; userUgi=" + userUgi);
-        }
-        // this will replace old token
-        userUgi.addCredentials(systemCredentials);
-      }
-    }
+    addCredentials();
 
     // Create a set of Containers whose logs will be uploaded in this cycle.
     // It includes:
@@ -332,9 +322,12 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
             finishedContainers.contains(container));
         if (uploadedFilePathsInThisCycle.size() > 0) {
           uploadedLogsInThisCycle = true;
-          this.delService.delete(this.userUgi.getShortUserName(), null,
-              uploadedFilePathsInThisCycle
-                  .toArray(new Path[uploadedFilePathsInThisCycle.size()]));
+          List<Path> uploadedFilePathsInThisCycleList = new ArrayList<>();
+          uploadedFilePathsInThisCycleList.addAll(uploadedFilePathsInThisCycle);
+          DeletionTask deletionTask = new FileDeletionTask(delService,
+              this.userUgi.getShortUserName(), null,
+              uploadedFilePathsInThisCycleList);
+          delService.delete(deletionTask);
         }
 
         // This container is finished, and all its logs have been uploaded,
@@ -352,11 +345,7 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
       }
 
       long currentTime = System.currentTimeMillis();
-      final Path renamedPath = this.rollingMonitorInterval <= 0
-              ? remoteNodeLogFileForApp : new Path(
-                remoteNodeLogFileForApp.getParent(),
-                remoteNodeLogFileForApp.getName() + "_"
-                    + currentTime);
+      final Path renamedPath = getRenamedPath(currentTime);
 
       final boolean rename = uploadedLogsInThisCycle;
       try {
@@ -396,6 +385,28 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
     }
   }
 
+  private Path getRenamedPath(long currentTime) {
+    return this.rollingMonitorInterval <= 0 ? remoteNodeLogFileForApp
+        : new Path(remoteNodeLogFileForApp.getParent(),
+        remoteNodeLogFileForApp.getName() + "_" + currentTime);
+  }
+
+  private void addCredentials() {
+    if (UserGroupInformation.isSecurityEnabled()) {
+      Credentials systemCredentials =
+          context.getSystemCredentialsForApps().get(appId);
+      if (systemCredentials != null) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Adding new framework-token for " + appId
+              + " for log-aggregation: " + systemCredentials.getAllTokens()
+              + "; userUgi=" + userUgi);
+        }
+        // this will replace old token
+        userUgi.addCredentials(systemCredentials);
+      }
+    }
+  }
+
   @VisibleForTesting
   protected LogWriter createLogWriter() {
     return new LogWriter();
@@ -561,8 +572,11 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
     }
 
     if (localAppLogDirs.size() > 0) {
-      this.delService.delete(this.userUgi.getShortUserName(), null,
-        localAppLogDirs.toArray(new Path[localAppLogDirs.size()]));
+      List<Path> localAppLogDirsList = new ArrayList<>();
+      localAppLogDirsList.addAll(localAppLogDirs);
+      DeletionTask deletionTask = new FileDeletionTask(delService,
+          this.userUgi.getShortUserName(), null, localAppLogDirsList);
+      this.delService.delete(deletionTask);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
index 2901743..9961748 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEvent;
@@ -247,8 +248,10 @@ public class NonAggregatingLogHandler extends AbstractService implements
         new ApplicationEvent(this.applicationId,
           ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED));
       if (localAppLogDirs.size() > 0) {
-        NonAggregatingLogHandler.this.delService.delete(user, null,
-          (Path[]) localAppLogDirs.toArray(new Path[localAppLogDirs.size()]));
+        FileDeletionTask deletionTask = new FileDeletionTask(
+            NonAggregatingLogHandler.this.delService, user, null,
+            localAppLogDirs);
+        NonAggregatingLogHandler.this.delService.delete(deletionTask);
       }
       try {
         NonAggregatingLogHandler.this.stateStore.removeLogDeleter(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
index 7831711..7212953 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
@@ -41,6 +41,7 @@ message DeletionServiceDeleteTaskProto {
   optional int64 deletionTime = 4;
   repeated string basedirs = 5;
   repeated int32 successorIds = 6;
+  optional string taskType = 7;
 }
 
 message LocalizedResourceProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java
index 2e0bbe0..87f4a1c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java
@@ -33,13 +33,14 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.nodemanager.DeletionService.FileDeletionTask;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService;
 import org.junit.AfterClass;
 import org.junit.Test;
 import org.mockito.Mockito;
 
+
 public class TestDeletionService {
 
   private static final FileContext lfs = getLfs();
@@ -123,8 +124,9 @@ public class TestDeletionService {
     del.start();
     try {
       for (Path p : dirs) {
-        del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",
-            p, null);
+        FileDeletionTask deletionTask = new FileDeletionTask(del,
+            (Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo", p, null);
+        del.delete(deletionTask);
       }
 
       int msecToWait = 20 * 1000;
@@ -159,8 +161,10 @@ public class TestDeletionService {
       del.start();
       for (Path p : content) {
         assertTrue(lfs.util().exists(new Path(baseDirs.get(0), p)));
-        del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",
-            p, baseDirs.toArray(new Path[4]));
+        FileDeletionTask deletionTask = new FileDeletionTask(del,
+            (Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo", p,
+            baseDirs);
+        del.delete(deletionTask);
       }
 
       int msecToWait = 20 * 1000;
@@ -196,8 +200,9 @@ public class TestDeletionService {
       del.init(conf);
       del.start();
       for (Path p : dirs) {
-        del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo", p,
-            null);
+        FileDeletionTask deletionTask = new FileDeletionTask(del,
+            (Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo", p, null);
+        del.delete(deletionTask);
       }
       int msecToWait = 20 * 1000;
       for (Path p : dirs) {
@@ -220,7 +225,9 @@ public class TestDeletionService {
     try {
       del.init(conf);
       del.start();
-      del.delete("dingo", new Path("/does/not/exist"));
+      FileDeletionTask deletionTask = new FileDeletionTask(del, "dingo",
+          new Path("/does/not/exist"), null);
+      del.delete(deletionTask);
     } finally {
       del.stop();
     }
@@ -247,18 +254,20 @@ public class TestDeletionService {
       // first we will try to delete sub directories which are present. This
       // should then trigger parent directory to be deleted.
       List<Path> subDirs = buildDirs(r, dirs.get(0), 2);
-      
+
       FileDeletionTask dependentDeletionTask =
-          del.createFileDeletionTask(null, dirs.get(0), new Path[] {});
+          new FileDeletionTask(del, null, dirs.get(0), new ArrayList<Path>());
       List<FileDeletionTask> deletionTasks = new ArrayList<FileDeletionTask>();
       for (Path subDir : subDirs) {
+        List<Path> subDirList = new ArrayList<>();
+        subDirList.add(subDir);
         FileDeletionTask deletionTask =
-            del.createFileDeletionTask(null, null, new Path[] { subDir });
-        deletionTask.addFileDeletionTaskDependency(dependentDeletionTask);
+            new FileDeletionTask(del, null, dirs.get(0), subDirList);
+        deletionTask.addDeletionTaskDependency(dependentDeletionTask);
         deletionTasks.add(deletionTask);
       }
       for (FileDeletionTask task : deletionTasks) {
-        del.scheduleFileDeletionTask(task);
+        del.delete(task);
       }
 
       int msecToWait = 20 * 1000;
@@ -274,19 +283,21 @@ public class TestDeletionService {
       subDirs = buildDirs(r, dirs.get(1), 2);
       subDirs.add(new Path(dirs.get(1), "absentFile"));
       
-      dependentDeletionTask =
-          del.createFileDeletionTask(null, dirs.get(1), new Path[] {});
+      dependentDeletionTask = new FileDeletionTask(del, null, dirs.get(1),
+          new ArrayList<Path>());
       deletionTasks = new ArrayList<FileDeletionTask>();
       for (Path subDir : subDirs) {
-        FileDeletionTask deletionTask =
-            del.createFileDeletionTask(null, null, new Path[] { subDir });
-        deletionTask.addFileDeletionTaskDependency(dependentDeletionTask);
+        List<Path> subDirList = new ArrayList<>();
+        subDirList.add(subDir);
+        FileDeletionTask deletionTask = new FileDeletionTask(del, null, null,
+            subDirList);
+        deletionTask.addDeletionTaskDependency(dependentDeletionTask);
         deletionTasks.add(deletionTask);
       }
       // marking one of the tasks as a failure.
       deletionTasks.get(2).setSuccess(false);
       for (FileDeletionTask task : deletionTasks) {
-        del.scheduleFileDeletionTask(task);
+        del.delete(task);
       }
 
       msecToWait = 20 * 1000;
@@ -327,8 +338,10 @@ public class TestDeletionService {
       del.start();
       for (Path p : content) {
         assertTrue(lfs.util().exists(new Path(baseDirs.get(0), p)));
-        del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",
-            p, baseDirs.toArray(new Path[4]));
+        FileDeletionTask deletionTask = new FileDeletionTask(del,
+            (Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo", p,
+            baseDirs);
+        del.delete(deletionTask);
       }
 
       // restart the deletion service
@@ -341,8 +354,10 @@ public class TestDeletionService {
       // verify paths are still eventually deleted
       int msecToWait = 10 * 1000;
       for (Path p : baseDirs) {
+        System.out.println("TEST Basedir: " + p.getName());
         for (Path q : content) {
           Path fp = new Path(p, q);
+          System.out.println("TEST Path: " + fp.toString());
           while (msecToWait > 0 && lfs.util().exists(fp)) {
             Thread.sleep(100);
             msecToWait -= 100;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: HDFS-11905. Fix license header inconsistency in hdfs. Contributed by Yeliang Cang.

Posted by xg...@apache.org.
HDFS-11905. Fix license header inconsistency in hdfs. Contributed by Yeliang Cang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff310355
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff310355
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff310355

Branch: refs/heads/YARN-5734
Commit: ff3103556b7cb19933f84f773e3712ef05591bc4
Parents: 0dcf843
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Fri Jun 2 00:28:33 2017 +0800
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Fri Jun 2 00:28:33 2017 +0800

----------------------------------------------------------------------
 .../src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java   | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff310355/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
index b0504f0..9f4df70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
@@ -1,6 +1,4 @@
-/*
- * UpgradeUtilities.java
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: HDFS-11383. Intern strings in BlockLocation and ExtendedBlock. Contributed by Misha Dmitriev.

Posted by xg...@apache.org.
HDFS-11383. Intern strings in BlockLocation and ExtendedBlock. Contributed by Misha Dmitriev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7101477e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7101477e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7101477e

Branch: refs/heads/YARN-5734
Commit: 7101477e4726a70ab0eab57c2d4480a04446a8dc
Parents: 219f4c1
Author: Andrew Wang <wa...@apache.org>
Authored: Thu Jun 1 15:20:18 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Thu Jun 1 15:20:18 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/fs/BlockLocation.java     | 21 +++++------
 .../org/apache/hadoop/util/StringInterner.java  | 37 ++++++++++----------
 .../hadoop/hdfs/protocol/ExtendedBlock.java     | 16 +++++----
 3 files changed, 40 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7101477e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
index b8cad3a..591febf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
@@ -22,6 +22,7 @@ import java.io.Serializable;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.util.StringInterner;
 
 /**
  * Represents the network location of a block, information about the hosts
@@ -114,27 +115,27 @@ public class BlockLocation implements Serializable {
     if (names == null) {
       this.names = EMPTY_STR_ARRAY;
     } else {
-      this.names = names;
+      this.names = StringInterner.internStringsInArray(names);
     }
     if (hosts == null) {
       this.hosts = EMPTY_STR_ARRAY;
     } else {
-      this.hosts = hosts;
+      this.hosts = StringInterner.internStringsInArray(hosts);
     }
     if (cachedHosts == null) {
       this.cachedHosts = EMPTY_STR_ARRAY;
     } else {
-      this.cachedHosts = cachedHosts;
+      this.cachedHosts = StringInterner.internStringsInArray(cachedHosts);
     }
     if (topologyPaths == null) {
       this.topologyPaths = EMPTY_STR_ARRAY;
     } else {
-      this.topologyPaths = topologyPaths;
+      this.topologyPaths = StringInterner.internStringsInArray(topologyPaths);
     }
     if (storageIds == null) {
       this.storageIds = EMPTY_STR_ARRAY;
     } else {
-      this.storageIds = storageIds;
+      this.storageIds = StringInterner.internStringsInArray(storageIds);
     }
     if (storageTypes == null) {
       this.storageTypes = EMPTY_STORAGE_TYPE_ARRAY;
@@ -238,7 +239,7 @@ public class BlockLocation implements Serializable {
     if (hosts == null) {
       this.hosts = EMPTY_STR_ARRAY;
     } else {
-      this.hosts = hosts;
+      this.hosts = StringInterner.internStringsInArray(hosts);
     }
   }
 
@@ -249,7 +250,7 @@ public class BlockLocation implements Serializable {
     if (cachedHosts == null) {
       this.cachedHosts = EMPTY_STR_ARRAY;
     } else {
-      this.cachedHosts = cachedHosts;
+      this.cachedHosts = StringInterner.internStringsInArray(cachedHosts);
     }
   }
 
@@ -260,7 +261,7 @@ public class BlockLocation implements Serializable {
     if (names == null) {
       this.names = EMPTY_STR_ARRAY;
     } else {
-      this.names = names;
+      this.names = StringInterner.internStringsInArray(names);
     }
   }
 
@@ -271,7 +272,7 @@ public class BlockLocation implements Serializable {
     if (topologyPaths == null) {
       this.topologyPaths = EMPTY_STR_ARRAY;
     } else {
-      this.topologyPaths = topologyPaths;
+      this.topologyPaths = StringInterner.internStringsInArray(topologyPaths);
     }
   }
 
@@ -279,7 +280,7 @@ public class BlockLocation implements Serializable {
     if (storageIds == null) {
       this.storageIds = EMPTY_STR_ARRAY;
     } else {
-      this.storageIds = storageIds;
+      this.storageIds = StringInterner.internStringsInArray(storageIds);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7101477e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringInterner.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringInterner.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringInterner.java
index d74f810..028e49a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringInterner.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringInterner.java
@@ -25,8 +25,9 @@ import com.google.common.collect.Interner;
 import com.google.common.collect.Interners;
 
 /**
- * Provides equivalent behavior to String.intern() to optimize performance, 
- * whereby does not consume memory in the permanent generation.
+ * Provides string interning utility methods. For weak interning,
+ * we use the standard String.intern() call, that performs very well
+ * (no problems with PermGen overflowing, etc.) starting from JDK 7.
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
@@ -35,20 +36,9 @@ public class StringInterner {
   /**
    * Retains a strong reference to each string instance it has interned.
    */
-  private final static Interner<String> strongInterner;
-  
-  /**
-   * Retains a weak reference to each string instance it has interned. 
-   */
-  private final static Interner<String> weakInterner;
-  
-  
-  
-  static {
-    strongInterner = Interners.newStrongInterner();
-    weakInterner = Interners.newWeakInterner();
-  }
-  
+  private final static Interner<String> STRONG_INTERNER =
+      Interners.newStrongInterner();
+
   /**
    * Interns and returns a reference to the representative instance 
    * for any of a collection of string instances that are equal to each other.
@@ -62,7 +52,7 @@ public class StringInterner {
     if (sample == null) {
       return null;
     }
-    return strongInterner.intern(sample);
+    return STRONG_INTERNER.intern(sample);
   }
   
   /**
@@ -78,7 +68,18 @@ public class StringInterner {
     if (sample == null) {
       return null;
     }
-    return weakInterner.intern(sample);
+    return sample.intern();
+  }
+
+  /**
+   * Interns all the strings in the given array in place,
+   * returning the same array.
+   */
+  public static String[] internStringsInArray(String[] strings) {
+    for (int i = 0; i < strings.length; i++) {
+      strings[i] = weakIntern(strings[i]);
+    }
+    return strings;
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7101477e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
index af24909..7939662 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -42,13 +43,13 @@ public class ExtendedBlock {
   }
 
   public ExtendedBlock(String poolId, Block b) {
-    this.poolId = poolId;
+    this.poolId = poolId != null ? poolId.intern() : null;
     this.block = b;
   }
 
   public ExtendedBlock(final String poolId, final long blkid, final long len,
       final long genstamp) {
-    this.poolId = poolId;
+    this.poolId = poolId != null ? poolId.intern() : null;
     block = new Block(blkid, len, genstamp);
   }
 
@@ -86,7 +87,7 @@ public class ExtendedBlock {
   }
 
   public void set(String poolId, Block blk) {
-    this.poolId = poolId;
+    this.poolId = poolId != null ? poolId.intern() : null;
     this.block = blk;
   }
 
@@ -107,13 +108,16 @@ public class ExtendedBlock {
       return false;
     }
     ExtendedBlock b = (ExtendedBlock)o;
-    return b.block.equals(block) && b.poolId.equals(poolId);
+    return b.block.equals(block) &&
+        (b.poolId != null ? b.poolId.equals(poolId) : poolId == null);
   }
 
   @Override // Object
   public int hashCode() {
-    int result = 31 + poolId.hashCode();
-    return (31 * result + block.hashCode());
+    return new HashCodeBuilder(31, 17).
+        append(poolId).
+        append(block).
+        toHashCode();
   }
 
   @Override // Object


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: HDFS-11832. Switch leftover logs to slf4j format in BlockManager.java. Contributed by Hui Xu and Chen Liang.

Posted by xg...@apache.org.
HDFS-11832. Switch leftover logs to slf4j format in BlockManager.java. Contributed by Hui Xu and Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7f085d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7f085d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7f085d6

Branch: refs/heads/YARN-5734
Commit: a7f085d6bf499edf23e650a4f7211c53a442da0e
Parents: 6c6a7a5
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon May 29 17:30:23 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon May 29 17:30:23 2017 +0900

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    | 153 ++++++++-----------
 .../blockmanagement/InvalidateBlocks.java       |  17 +--
 2 files changed, 72 insertions(+), 98 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f085d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f0c12cd..42ee850 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -498,14 +498,13 @@ public class BlockManager implements BlockStatsMXBean {
 
     bmSafeMode = new BlockManagerSafeMode(this, namesystem, haEnabled, conf);
 
-    LOG.info("defaultReplication         = " + defaultReplication);
-    LOG.info("maxReplication             = " + maxReplication);
-    LOG.info("minReplication             = " + minReplication);
-    LOG.info("maxReplicationStreams      = " + maxReplicationStreams);
-    LOG.info("redundancyRecheckInterval  = " + redundancyRecheckIntervalMs +
-        "ms");
-    LOG.info("encryptDataTransfer        = " + encryptDataTransfer);
-    LOG.info("maxNumBlocksToLog          = " + maxNumBlocksToLog);
+    LOG.info("defaultReplication         = {}", defaultReplication);
+    LOG.info("maxReplication             = {}", maxReplication);
+    LOG.info("minReplication             = {}", minReplication);
+    LOG.info("maxReplicationStreams      = {}", maxReplicationStreams);
+    LOG.info("redundancyRecheckInterval  = {}ms", redundancyRecheckIntervalMs);
+    LOG.info("encryptDataTransfer        = {}", encryptDataTransfer);
+    LOG.info("maxNumBlocksToLog          = {}", maxNumBlocksToLog);
   }
 
   private static BlockTokenSecretManager createBlockTokenSecretManager(
@@ -513,7 +512,8 @@ public class BlockManager implements BlockStatsMXBean {
     final boolean isEnabled = conf.getBoolean(
         DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, 
         DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);
-    LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + "=" + isEnabled);
+    LOG.info("{} = {}", DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY,
+            isEnabled);
 
     if (!isEnabled) {
       if (UserGroupInformation.isSecurityEnabled()) {
@@ -534,12 +534,10 @@ public class BlockManager implements BlockStatsMXBean {
         DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT);
     final String encryptionAlgorithm = conf.get(
         DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
-    LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY
-        + "=" + updateMin + " min(s), "
-        + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY
-        + "=" + lifetimeMin + " min(s), "
-        + DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY
-        + "=" + encryptionAlgorithm);
+    LOG.info("{}={} min(s), {}={} min(s), {}={}",
+        DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY, updateMin,
+        DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY, lifetimeMin,
+        DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY, encryptionAlgorithm);
     
     String nsId = DFSUtil.getNamenodeNameServiceId(conf);
     boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId);
@@ -692,8 +690,8 @@ public class BlockManager implements BlockStatsMXBean {
       Collection<DatanodeDescriptor> corruptNodes =
           corruptReplicas.getNodes(block);
       if (corruptNodes == null) {
-        LOG.warn(block.getBlockId() +
-            " is corrupt but has no associated node.");
+        LOG.warn("{} is corrupt but has no associated node.",
+                 block.getBlockId());
         continue;
       }
       int numNodesToFind = corruptNodes.size();
@@ -1156,9 +1154,9 @@ public class BlockManager implements BlockStatsMXBean {
     final int numCorruptNodes = numReplicas.corruptReplicas();
     final int numCorruptReplicas = corruptReplicas.numCorruptReplicas(blk);
     if (numCorruptNodes != numCorruptReplicas) {
-      LOG.warn("Inconsistent number of corrupt replicas for "
-          + blk + " blockMap has " + numCorruptNodes
-          + " but corrupt replicas map has " + numCorruptReplicas);
+      LOG.warn("Inconsistent number of corrupt replicas for {}"
+          + " blockMap has {} but corrupt replicas map has {}",
+          blk, numCorruptNodes, numCorruptReplicas);
     }
 
     final int numNodes = blocksMap.numNodes(blk);
@@ -1232,7 +1230,7 @@ public class BlockManager implements BlockStatsMXBean {
           Collections.<LocatedBlock> emptyList(), null, false, feInfo, ecPolicy);
     } else {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("blocks = " + java.util.Arrays.asList(blocks));
+        LOG.debug("blocks = {}", java.util.Arrays.asList(blocks));
       }
       final AccessMode mode = needBlockToken? BlockTokenIdentifier.AccessMode.READ: null;
       final List<LocatedBlock> locatedblocks = createLocatedBlockList(
@@ -1829,8 +1827,7 @@ public class BlockManager implements BlockStatsMXBean {
         numReplicas);
     if(srcNodes == null || srcNodes.length == 0) {
       // block can not be reconstructed from any node
-      LOG.debug("Block " + block + " cannot be reconstructed " +
-          "from any node");
+      LOG.debug("Block {} cannot be reconstructed from any node", block);
       return null;
     }
 
@@ -2272,11 +2269,9 @@ public class BlockManager implements BlockStatsMXBean {
       removeBlock(b);
     }
     if (trackBlockCounts) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adjusting safe-mode totals for deletion."
-            + "decreasing safeBlocks by " + numRemovedSafe
-            + ", totalBlocks by " + numRemovedComplete);
-      }
+      LOG.debug("Adjusting safe-mode totals for deletion."
+          + "decreasing safeBlocks by {}, totalBlocks by {}",
+          numRemovedSafe, numRemovedComplete);
       bmSafeMode.adjustBlockTotals(-numRemovedSafe, -numRemovedComplete);
     }
   }
@@ -2440,18 +2435,14 @@ public class BlockManager implements BlockStatsMXBean {
 
         BlockInfo bi = getStoredBlock(b);
         if (bi == null) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("BLOCK* rescanPostponedMisreplicatedBlocks: " +
-                "Postponed mis-replicated block " + b + " no longer found " +
-                "in block map.");
-          }
+          LOG.debug("BLOCK* rescanPostponedMisreplicatedBlocks: " +
+              "Postponed mis-replicated block {} no longer found " +
+              "in block map.", b);
           continue;
         }
         MisReplicationResult res = processMisReplicatedBlock(bi);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("BLOCK* rescanPostponedMisreplicatedBlocks: " +
-              "Re-scanned block " + b + ", result is " + res);
-        }
+        LOG.debug("BLOCK* rescanPostponedMisreplicatedBlocks: " +
+            "Re-scanned block {}, result is {}", b, res);
         if (res == MisReplicationResult.POSTPONE) {
           rescannedMisreplicatedBlocks.add(b);
         }
@@ -2461,10 +2452,9 @@ public class BlockManager implements BlockStatsMXBean {
       rescannedMisreplicatedBlocks.clear();
       long endSize = postponedMisreplicatedBlocks.size();
       namesystem.writeUnlock();
-      LOG.info("Rescan of postponedMisreplicatedBlocks completed in " +
-          (Time.monotonicNow() - startTime) + " msecs. " +
-          endSize + " blocks are left. " +
-          (startSize - endSize) + " blocks were removed.");
+      LOG.info("Rescan of postponedMisreplicatedBlocks completed in {}" +
+          " msecs. {} blocks are left. {} blocks were removed.",
+          (Time.monotonicNow() - startTime), endSize, (startSize - endSize));
     }
   }
   
@@ -2603,9 +2593,9 @@ public class BlockManager implements BlockStatsMXBean {
       ReplicaState reportedState = iblk.getState();
 
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Initial report of block " + iblk.getBlockName()
-            + " on " + storageInfo.getDatanodeDescriptor() + " size " +
-            iblk.getNumBytes() + " replicaState = " + reportedState);
+        LOG.debug("Initial report of block {} on {} size {} replicaState = {}",
+            iblk.getBlockName(), storageInfo.getDatanodeDescriptor(),
+            iblk.getNumBytes(), reportedState);
       }
       if (shouldPostponeBlocksFromFuture && isGenStampInFuture(iblk)) {
         queueReportedBlock(storageInfo, iblk, reportedState,
@@ -2684,11 +2674,8 @@ public class BlockManager implements BlockStatsMXBean {
 
       ReplicaState reportedState = replica.getState();
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Reported block " + replica
-                  + " on " + dn + " size " + replica.getNumBytes()
-                  + " replicaState = " + reportedState);
-      }
+      LOG.debug("Reported block {} on {} size {} replicaState = {}",
+          replica, dn, replica.getNumBytes(), reportedState);
 
       if (shouldPostponeBlocksFromFuture
           && isGenStampInFuture(replica)) {
@@ -2754,9 +2741,7 @@ public class BlockManager implements BlockStatsMXBean {
     BlockUCState ucState = storedBlock.getBlockUCState();
 
     // Block is on the NN
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("In memory blockUCState = " + ucState);
-    }
+    LOG.debug("In memory blockUCState = {}", ucState);
 
     // Ignore replicas already scheduled to be removed from the DN
     if (invalidateBlocks.contains(dn, replica)) {
@@ -2798,13 +2783,10 @@ public class BlockManager implements BlockStatsMXBean {
   private void queueReportedBlock(DatanodeStorageInfo storageInfo, Block block,
       ReplicaState reportedState, String reason) {
     assert shouldPostponeBlocksFromFuture;
-    
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Queueing reported block " + block +
-          " in state " + reportedState + 
-          " from datanode " + storageInfo.getDatanodeDescriptor() +
-          " for later processing because " + reason + ".");
-    }
+
+    LOG.debug("Queueing reported block {} in state {}" +
+            " from datanode {} for later processing because {}.",
+        block, reportedState, storageInfo.getDatanodeDescriptor(), reason);
     pendingDNMessages.enqueueReportedBlock(storageInfo, block, reportedState);
   }
 
@@ -2825,9 +2807,7 @@ public class BlockManager implements BlockStatsMXBean {
   private void processQueuedMessages(Iterable<ReportedBlockInfo> rbis)
       throws IOException {
     for (ReportedBlockInfo rbi : rbis) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Processing previouly queued message " + rbi);
-      }
+      LOG.debug("Processing previouly queued message {}", rbi);
       if (rbi.getReportedState() == null) {
         // This is a DELETE_BLOCK request
         DatanodeStorageInfo storageInfo = rbi.getStorageInfo();
@@ -2852,8 +2832,8 @@ public class BlockManager implements BlockStatsMXBean {
       "block postponement.";
     int count = pendingDNMessages.count();
     if (count > 0) {
-      LOG.info("Processing " + count + " messages from DataNodes " +
-          "that were previously queued during standby state");
+      LOG.info("Processing {} messages from DataNodes " +
+          "that were previously queued during standby state", count);
     }
     processQueuedMessages(pendingDNMessages.takeAll());
     assert pendingDNMessages.count() == 0;
@@ -2935,9 +2915,8 @@ public class BlockManager implements BlockStatsMXBean {
           // the block report got a little bit delayed after the pipeline
           // closed. So, ignore this report, assuming we will get a
           // FINALIZED replica later. See HDFS-2791
-          LOG.info("Received an RBW replica for " + storedBlock +
-              " on " + dn + ": ignoring it, since it is " +
-              "complete with the same genstamp");
+          LOG.info("Received an RBW replica for {} on {}: ignoring it, since "
+                  + "it is complete with the same genstamp", storedBlock, dn);
           return null;
         } else {
           return new BlockToMarkCorrupt(new Block(reported), storedBlock,
@@ -2952,7 +2931,7 @@ public class BlockManager implements BlockStatsMXBean {
       + " for block: " + storedBlock + 
       " on " + dn + " size " + storedBlock.getNumBytes();
       // log here at WARN level since this is really a broken HDFS invariant
-      LOG.warn(msg);
+      LOG.warn("{}", msg);
       return new BlockToMarkCorrupt(new Block(reported), storedBlock, msg,
           Reason.INVALID_STATE);
     }
@@ -3131,9 +3110,9 @@ public class BlockManager implements BlockStatsMXBean {
     int corruptReplicasCount = corruptReplicas.numCorruptReplicas(storedBlock);
     int numCorruptNodes = num.corruptReplicas();
     if (numCorruptNodes != corruptReplicasCount) {
-      LOG.warn("Inconsistent number of corrupt replicas for " +
-          storedBlock + ". blockMap has " + numCorruptNodes +
-          " but corrupt replicas map has " + corruptReplicasCount);
+      LOG.warn("Inconsistent number of corrupt replicas for {}" +
+          ". blockMap has {} but corrupt replicas map has {}",
+          storedBlock, numCorruptNodes, corruptReplicasCount);
     }
     if ((corruptReplicasCount > 0) && (numLiveReplicas >= fileRedundancy)) {
       invalidateCorruptReplicas(storedBlock, reportedBlock, num);
@@ -3298,12 +3277,13 @@ public class BlockManager implements BlockStatsMXBean {
             / totalBlocks, 1.0);
 
         if (!blocksItr.hasNext()) {
-          LOG.info("Total number of blocks            = " + blocksMap.size());
-          LOG.info("Number of invalid blocks          = " + nrInvalid);
-          LOG.info("Number of under-replicated blocks = " + nrUnderReplicated);
-          LOG.info("Number of  over-replicated blocks = " + nrOverReplicated
-              + ((nrPostponed > 0) ? (" (" + nrPostponed + " postponed)") : ""));
-          LOG.info("Number of blocks being written    = " + nrUnderConstruction);
+          LOG.info("Total number of blocks            = {}", blocksMap.size());
+          LOG.info("Number of invalid blocks          = {}", nrInvalid);
+          LOG.info("Number of under-replicated blocks = {}", nrUnderReplicated);
+          LOG.info("Number of  over-replicated blocks = {}{}", nrOverReplicated,
+              ((nrPostponed > 0) ? (" (" + nrPostponed + " postponed)") : ""));
+          LOG.info("Number of blocks being written    = {}",
+                   nrUnderConstruction);
           NameNode.stateChangeLog
               .info("STATE* Replication Queue initialization "
                   + "scan for invalid, over- and under-replicated blocks "
@@ -3730,11 +3710,8 @@ public class BlockManager implements BlockStatsMXBean {
 
     final DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
 
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Reported block " + block
-          + " on " + node + " size " + block.getNumBytes()
-          + " replicaState = " + reportedState);
-    }
+    LOG.debug("Reported block {} on {} size {} replicaState = {}",
+        block, node, block.getNumBytes(), reportedState);
 
     if (shouldPostponeBlocksFromFuture &&
         isGenStampInFuture(block)) {
@@ -3756,9 +3733,7 @@ public class BlockManager implements BlockStatsMXBean {
 
     BlockUCState ucState = storedBlock.getBlockUCState();
     // Block is on the NN
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("In memory blockUCState = " + ucState);
-    }
+    LOG.debug("In memory blockUCState = {}", ucState);
 
     // Ignore replicas already scheduled to be removed from the DN
     if(invalidateBlocks.contains(node, block)) {
@@ -4013,8 +3988,8 @@ public class BlockManager implements BlockStatsMXBean {
         numExtraRedundancy++;
       }
     }
-    LOG.info("Invalidated " + numExtraRedundancy
-        + " extra redundancy blocks on " + srcNode + " after it is in service");
+    LOG.info("Invalidated {} extra redundancy blocks on {} after "
+             + "it is in service", numExtraRedundancy, srcNode);
   }
 
   /**
@@ -4175,8 +4150,8 @@ public class BlockManager implements BlockStatsMXBean {
       try {
         DatanodeDescriptor dnDescriptor = datanodeManager.getDatanode(dn);
         if (dnDescriptor == null) {
-          LOG.warn("DataNode " + dn + " cannot be found with UUID " +
-              dn.getDatanodeUuid() + ", removing block invalidation work.");
+          LOG.warn("DataNode {} cannot be found with UUID {}" +
+              ", removing block invalidation work.", dn, dn.getDatanodeUuid());
           invalidateBlocks.remove(dn);
           return 0;
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f085d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
index 694cc95..4e8bb58 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
@@ -70,13 +70,14 @@ class InvalidateBlocks {
   }
 
   private void printBlockDeletionTime(final Logger log) {
-    log.info(DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY
-        + " is set to " + DFSUtil.durationToString(pendingPeriodInMs));
+    log.info("{} is set to {}",
+        DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
+        DFSUtil.durationToString(pendingPeriodInMs));
     SimpleDateFormat sdf = new SimpleDateFormat("yyyy MMM dd HH:mm:ss");
     Calendar calendar = new GregorianCalendar();
     calendar.add(Calendar.SECOND, (int) (this.pendingPeriodInMs / 1000));
-    log.info("The block deletion will start around "
-        + sdf.format(calendar.getTime()));
+    log.info("The block deletion will start around {}",
+        sdf.format(calendar.getTime()));
   }
 
   /** @return the number of blocks to be invalidated . */
@@ -173,11 +174,9 @@ class InvalidateBlocks {
   synchronized List<Block> invalidateWork(final DatanodeDescriptor dn) {
     final long delay = getInvalidationDelay();
     if (delay > 0) {
-      if (BlockManager.LOG.isDebugEnabled()) {
-        BlockManager.LOG
-            .debug("Block deletion is delayed during NameNode startup. "
-                + "The deletion will start after " + delay + " ms.");
-      }
+      BlockManager.LOG
+          .debug("Block deletion is delayed during NameNode startup. "
+              + "The deletion will start after {} ms.", delay);
       return null;
     }
     final LightWeightHashSet<Block> set = node2blocks.get(dn);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: YARN-6477. Dispatcher no longer needs the raw types suppression. (Maya Wexler via Haibo Chen)

Posted by xg...@apache.org.
YARN-6477. Dispatcher no longer needs the raw types suppression. (Maya Wexler via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b4a6524
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b4a6524
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b4a6524

Branch: refs/heads/YARN-5734
Commit: 4b4a6524f2df3a891e9d5486ec39f7987766d84f
Parents: 91d6fe1
Author: Haibo Chen <ha...@apache.org>
Authored: Tue May 30 16:58:15 2017 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Tue May 30 16:58:15 2017 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java      | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4a6524/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
index 0858a0b..ce5a513 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.classification.InterfaceStability.Evolving;
  * event handlers based on event types.
  * 
  */
-@SuppressWarnings("rawtypes")
 @Public
 @Evolving
 public interface Dispatcher {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: MAPREDUCE-6887. Modifier 'static' is redundant for inner enums. Contributed by ZhangBing Lin.

Posted by xg...@apache.org.
MAPREDUCE-6887. Modifier 'static' is redundant for inner enums. Contributed by ZhangBing Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4015f86
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4015f86
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4015f86

Branch: refs/heads/YARN-5734
Commit: d4015f8628dd973c7433639451a9acc3e741d2a2
Parents: a7f085d
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue May 30 14:48:58 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue May 30 14:48:58 2017 +0900

----------------------------------------------------------------------
 .../hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java  | 2 +-
 .../main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java    | 4 ++--
 .../src/main/java/org/apache/hadoop/mapred/FileInputFormat.java  | 2 +-
 .../src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java | 2 +-
 .../src/main/java/org/apache/hadoop/mapred/JobClient.java        | 2 +-
 .../src/main/java/org/apache/hadoop/mapred/JobInProgress.java    | 2 +-
 .../src/main/java/org/apache/hadoop/mapred/JobTracker.java       | 2 +-
 .../src/main/java/org/apache/hadoop/mapred/Task.java             | 2 +-
 .../src/main/java/org/apache/hadoop/mapred/TaskLog.java          | 2 +-
 .../src/main/java/org/apache/hadoop/mapred/TaskStatus.java       | 4 ++--
 .../main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java | 2 +-
 .../src/main/java/org/apache/hadoop/mapreduce/Cluster.java       | 2 +-
 .../src/main/java/org/apache/hadoop/mapreduce/Job.java           | 4 ++--
 .../src/main/java/org/apache/hadoop/mapreduce/JobStatus.java     | 2 +-
 .../org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java   | 2 +-
 .../apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java    | 2 +-
 .../org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java   | 2 +-
 .../org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java | 2 +-
 .../java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java    | 2 +-
 .../java/org/apache/hadoop/mapreduce/task/reduce/MapHost.java    | 2 +-
 .../java/org/apache/hadoop/mapreduce/v2/hs/HSAuditLogger.java    | 2 +-
 .../org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java    | 2 +-
 .../src/test/java/org/apache/hadoop/RandomTextWriterJob.java     | 2 +-
 .../src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java        | 2 +-
 .../src/test/java/org/apache/hadoop/fs/TestDFSIO.java            | 4 ++--
 .../test/java/org/apache/hadoop/fs/slive/OperationOutput.java    | 2 +-
 .../src/test/java/org/apache/hadoop/fs/slive/PathFinder.java     | 2 +-
 .../java/org/apache/hadoop/mapred/GenericMRLoadGenerator.java    | 2 +-
 .../src/test/java/org/apache/hadoop/mapred/MRBench.java          | 2 +-
 .../test/java/org/apache/hadoop/mapred/ThreadedMapBenchmark.java | 2 +-
 .../java/org/apache/hadoop/mapreduce/GenericMRLoadGenerator.java | 2 +-
 .../src/test/java/org/apache/hadoop/mapreduce/LargeSorter.java   | 2 +-
 .../test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java  | 2 +-
 .../src/test/java/org/apache/hadoop/mapreduce/RandomWriter.java  | 2 +-
 .../org/apache/hadoop/mapreduce/TimelineServicePerformance.java  | 2 +-
 .../main/java/org/apache/hadoop/examples/RandomTextWriter.java   | 2 +-
 .../src/main/java/org/apache/hadoop/examples/RandomWriter.java   | 2 +-
 .../main/java/org/apache/hadoop/examples/terasort/TeraGen.java   | 2 +-
 38 files changed, 42 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
index 58fd7b5..cb4f0c9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
@@ -102,7 +102,7 @@ public class ContainerLauncherImpl extends AbstractService implements
     }
   }
   
-  private static enum ContainerState {
+  private enum ContainerState {
     PREP, FAILED, RUNNING, DONE, KILLED_BEFORE_LAUNCH
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
index b800d31..5fd66ac 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
@@ -114,7 +114,7 @@ public class MRApps extends Apps {
     throw new YarnRuntimeException("Unknown task type: "+ type.toString());
   }
 
-  public static enum TaskAttemptStateUI {
+  public enum TaskAttemptStateUI {
     NEW(
         new TaskAttemptState[] { TaskAttemptState.NEW,
             TaskAttemptState.STARTING }),
@@ -136,7 +136,7 @@ public class MRApps extends Apps {
     }
   }
 
-  public static enum TaskStateUI {
+  public enum TaskStateUI {
     RUNNING(
         new TaskState[]{TaskState.RUNNING}),
     PENDING(new TaskState[]{TaskState.SCHEDULED}),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
index 5803d60..3e0ea25 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
@@ -72,7 +72,7 @@ public abstract class FileInputFormat<K, V> implements InputFormat<K, V> {
     LogFactory.getLog(FileInputFormat.class);
   
   @Deprecated
-  public static enum Counter { 
+  public enum Counter {
     BYTES_READ
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java
index 821c1e8..3932e58 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.util.Progressable;
 public abstract class FileOutputFormat<K, V> implements OutputFormat<K, V> {
 
   @Deprecated
-  public static enum Counter { 
+  public enum Counter {
     BYTES_WRITTEN
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
index c43fc0a..2f8dd63 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
@@ -152,7 +152,7 @@ public class JobClient extends CLI implements AutoCloseable {
   public static final String MAPREDUCE_CLIENT_RETRY_POLICY_SPEC_DEFAULT =
       "10000,6,60000,10"; // t1,n1,t2,n2,...
 
-  public static enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL }
+  public enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL }
   private TaskStatusFilter taskOutputFilter = TaskStatusFilter.FAILED; 
   
   private int maxRetry = MRJobConfig.DEFAULT_MR_CLIENT_JOB_MAX_RETRIES;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobInProgress.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobInProgress.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobInProgress.java
index 1fd2eb3..a899801 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobInProgress.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobInProgress.java
@@ -30,7 +30,7 @@ public class JobInProgress {
    * @deprecated Provided for compatibility. Use {@link JobCounter} instead.
    */
   @Deprecated
-  public static enum Counter { 
+  public enum Counter {
     NUM_FAILED_MAPS, 
     NUM_FAILED_REDUCES,
     TOTAL_LAUNCHED_MAPS,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobTracker.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobTracker.java
index 7999500..7cfdc02 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobTracker.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobTracker.java
@@ -28,7 +28,7 @@ public class JobTracker {
    * <code>State</code> is no longer used since M/R 2.x. It is kept in case
    * that M/R 1.x applications may still use it.
    */
-  public static enum State {
+  public enum State {
     INITIALIZING, RUNNING
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
index c1ae0ab..b0347fd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
@@ -90,7 +90,7 @@ abstract public class Task implements Writable, Configurable {
    * @deprecated Provided for compatibility. Use {@link TaskCounter} instead.
    */
   @Deprecated
-  public static enum Counter { 
+  public enum Counter {
     MAP_INPUT_RECORDS, 
     MAP_OUTPUT_RECORDS,
     MAP_SKIPPED_RECORDS,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
index bf838c2..059b0ad 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
@@ -358,7 +358,7 @@ public class TaskLog {
    * The filter for userlogs.
    */
   @InterfaceAudience.Private
-  public static enum LogName {
+  public enum LogName {
     /** Log on the stdout of the task. */
     STDOUT ("stdout"),
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskStatus.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskStatus.java
index a5c12de..bd12cd3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskStatus.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskStatus.java
@@ -45,12 +45,12 @@ public abstract class TaskStatus implements Writable, Cloneable {
   //enumeration for reporting current phase of a task.
   @InterfaceAudience.Private
   @InterfaceStability.Unstable
-  public static enum Phase{STARTING, MAP, SHUFFLE, SORT, REDUCE, CLEANUP}
+  public enum Phase{STARTING, MAP, SHUFFLE, SORT, REDUCE, CLEANUP}
 
   // what state is the task in?
   @InterfaceAudience.Private
   @InterfaceStability.Unstable
-  public static enum State {RUNNING, SUCCEEDED, FAILED, UNASSIGNED, KILLED, 
+  public enum State {RUNNING, SUCCEEDED, FAILED, UNASSIGNED, KILLED,
                             COMMIT_PENDING, FAILED_UNCLEAN, KILLED_UNCLEAN, PREEMPTED}
     
   private final TaskAttemptID taskid;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java
index 5a3ed5b..d196723 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java
@@ -68,7 +68,7 @@ class BinaryProtocol<K1 extends WritableComparable, V1 extends Writable,
    * The integer codes to represent the different messages. These must match
    * the C++ codes or massive confusion will result.
    */
-  private static enum MessageType { START(0),
+  private enum MessageType { START(0),
                                     SET_JOB_CONF(1),
                                     SET_INPUT_TYPES(2),
                                     RUN_MAP(3),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
index 3a2f982..fbf6806 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
@@ -54,7 +54,7 @@ import org.apache.hadoop.security.token.Token;
 public class Cluster {
   
   @InterfaceStability.Evolving
-  public static enum JobTrackerStatus {INITIALIZING, RUNNING};
+  public enum JobTrackerStatus {INITIALIZING, RUNNING};
 
   private ClientProtocolProvider clientProtocolProvider;
   private ClientProtocol client;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
index 84d90de..2048768 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
@@ -79,7 +79,7 @@ public class Job extends JobContextImpl implements JobContext {
   private static final Log LOG = LogFactory.getLog(Job.class);
 
   @InterfaceStability.Evolving
-  public static enum JobState {DEFINE, RUNNING};
+  public enum JobState {DEFINE, RUNNING};
   private static final long MAX_JOBSTATUS_AGE = 1000 * 2;
   public static final String OUTPUT_FILTER = "mapreduce.client.output.filter";
   /** Key in mapred-*.xml that sets completionPollInvervalMillis */
@@ -104,7 +104,7 @@ public class Job extends JobContextImpl implements JobContext {
   public static final boolean DEFAULT_USE_WILDCARD_FOR_LIBJARS = true;
 
   @InterfaceStability.Evolving
-  public static enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL }
+  public enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL }
 
   static {
     ConfigUtil.loadResources();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java
index 9ff75b9..d8b2321 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java
@@ -52,7 +52,7 @@ public class JobStatus implements Writable, Cloneable {
   /**
    * Current state of the job 
    */
-  public static enum State {
+  public enum State {
     RUNNING(1),
     SUCCEEDED(2),
     FAILED(3),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
index 7ec882f..ebbc223 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
@@ -85,7 +85,7 @@ public abstract class FileInputFormat<K, V> extends InputFormat<K, V> {
   private static final double SPLIT_SLOP = 1.1;   // 10% slop
   
   @Deprecated
-  public static enum Counter { 
+  public enum Counter {
     BYTES_READ
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java
index 4d5be68..f2b4734 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java
@@ -52,7 +52,7 @@ public class ControlledJob {
   private static final Log LOG = LogFactory.getLog(ControlledJob.class);
 
   // A job will be in one of the following states
-  public static enum State {SUCCESS, WAITING, RUNNING, READY, FAILED,
+  public enum State {SUCCESS, WAITING, RUNNING, READY, FAILED,
                             DEPENDENT_FAILED}; 
   public static final String CREATE_DIR = "mapreduce.jobcontrol.createdir.ifnotexist";
   private State state;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java
index e5399b5..4c7114a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java
@@ -58,7 +58,7 @@ public class JobControl implements Runnable {
   private static final Log LOG = LogFactory.getLog(JobControl.class);
 
   // The thread can be in one of the following state
-  public static enum ThreadState {RUNNING, SUSPENDED,STOPPED, STOPPING, READY};
+  public enum ThreadState {RUNNING, SUSPENDED,STOPPED, STOPPING, READY};
 	
   private ThreadState runnerState;			// the thread state
 	

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
index 2c69542..c11f8d8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
@@ -61,7 +61,7 @@ public static final String COMPRESS_TYPE = "mapreduce.output.fileoutputformat.co
 public static final String OUTDIR = "mapreduce.output.fileoutputformat.outputdir";
 
   @Deprecated
-  public static enum Counter {
+  public enum Counter {
     BYTES_WRITTEN
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
index c6889cb..6112fb5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
@@ -71,7 +71,7 @@ class Fetcher<K,V> extends Thread {
   private static final String FETCH_RETRY_AFTER_HEADER = "Retry-After";
 
   protected final Reporter reporter;
-  private static enum ShuffleErrors{IO_ERROR, WRONG_LENGTH, BAD_ID, WRONG_MAP,
+  private enum ShuffleErrors{IO_ERROR, WRONG_LENGTH, BAD_ID, WRONG_MAP,
                                     CONNECTION, WRONG_REDUCE}
   
   private final static String SHUFFLE_ERR_GRP_NAME = "Shuffle Errors";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MapHost.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MapHost.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MapHost.java
index dfb28de..948e02a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MapHost.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MapHost.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptID;
 @InterfaceStability.Unstable
 public class MapHost {
   
-  public static enum State {
+  public enum State {
     IDLE,               // No map outputs available
     BUSY,               // Map outputs are being fetched
     PENDING,            // Known map outputs which need to be fetched

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSAuditLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSAuditLogger.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSAuditLogger.java
index f4deab0..5d851a5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSAuditLogger.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSAuditLogger.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.ipc.Server;
 public class HSAuditLogger {
   private static final Log LOG = LogFactory.getLog(HSAuditLogger.class);
 
-  static enum Keys {
+  enum Keys {
     USER, OPERATION, TARGET, RESULT, IP, PERMISSIONS, DESCRIPTION
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
index 6b98e47..c7d6d19 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
@@ -87,7 +87,7 @@ public class HistoryFileManager extends AbstractService {
   private static final Log LOG = LogFactory.getLog(HistoryFileManager.class);
   private static final Log SUMMARY_LOG = LogFactory.getLog(JobSummary.class);
 
-  private static enum HistoryInfoState {
+  private enum HistoryInfoState {
     IN_INTERMEDIATE, IN_DONE, DELETED, MOVE_FAILED
   };
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/RandomTextWriterJob.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/RandomTextWriterJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/RandomTextWriterJob.java
index 2d341e9..1e3ee8c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/RandomTextWriterJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/RandomTextWriterJob.java
@@ -53,7 +53,7 @@ public class RandomTextWriterJob extends Configured implements Tool {
   public static final String MIN_KEY = "mapreduce.randomtextwriter.minwordskey";
   public static final String MAX_KEY = "mapreduce.randomtextwriter.maxwordskey";
 
-  static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
+  enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
 
   public Job createJob(Configuration conf) throws IOException {
     long numBytesToWritePerMap = conf.getLong(BYTES_PER_MAP, 10 * 1024);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java
index 8b3f4c8..91c3c26 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java
@@ -162,7 +162,7 @@ public class JHLogAnalyzer {
     Configuration.addDefaultResource("hdfs-site.xml");
   }
 
-  static enum StatSeries {
+  enum StatSeries {
     STAT_ALL_SLOT_TIME
           (AccumulatingReducer.VALUE_TYPE_LONG + "allSlotTime"),
     STAT_FAILED_SLOT_TIME

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
index 2eb4b29..34eac83 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
@@ -123,7 +123,7 @@ public class TestDFSIO implements Tool {
     Configuration.addDefaultResource("mapred-site.xml");
   }
 
-  private static enum TestType {
+  private enum TestType {
     TEST_TYPE_READ("read"),
     TEST_TYPE_WRITE("write"),
     TEST_TYPE_CLEANUP("cleanup"),
@@ -145,7 +145,7 @@ public class TestDFSIO implements Tool {
     }
   }
 
-  static enum ByteMultiple {
+  enum ByteMultiple {
     B(1L),
     KB(0x400L),
     MB(0x100000L),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/OperationOutput.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/OperationOutput.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/OperationOutput.java
index 02584f1..3497abe 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/OperationOutput.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/OperationOutput.java
@@ -46,7 +46,7 @@ class OperationOutput {
   private static final String MEASUREMENT_SEP = "*";
   private static final String STRING_SEP = ";";
 
-  static enum OutputType {
+  enum OutputType {
     STRING, FLOAT, LONG, DOUBLE, INTEGER
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/PathFinder.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/PathFinder.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/PathFinder.java
index 080985b..2dfe2ef 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/PathFinder.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/PathFinder.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.fs.Path;
  */
 class PathFinder {
 
-  private static enum Type {
+  private enum Type {
     FILE, DIRECTORY
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/GenericMRLoadGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/GenericMRLoadGenerator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/GenericMRLoadGenerator.java
index f67ca1c..c0b2311 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/GenericMRLoadGenerator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/GenericMRLoadGenerator.java
@@ -232,7 +232,7 @@ public class GenericMRLoadGenerator extends Configured implements Tool {
     }
   }
 
-  static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
+  enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
 
   static class RandomMapOutput extends MapReduceBase
       implements Mapper<Text,Text,Text,Text> {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
index aee5003..5286e86 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
@@ -48,7 +48,7 @@ public class MRBench extends Configured implements Tool{
   private static Path INPUT_DIR = new Path(BASE_DIR, DEFAULT_INPUT_SUB);
   private static Path OUTPUT_DIR = new Path(BASE_DIR, DEFAULT_OUTPUT_SUB);
   
-  public static enum Order {RANDOM, ASCENDING, DESCENDING}; 
+  public enum Order {RANDOM, ASCENDING, DESCENDING};
   
   /**
    * Takes input format as text lines, runs some processing on it and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ThreadedMapBenchmark.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ThreadedMapBenchmark.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ThreadedMapBenchmark.java
index f551232..c607bfb 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ThreadedMapBenchmark.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ThreadedMapBenchmark.java
@@ -63,7 +63,7 @@ public class ThreadedMapBenchmark extends Configured implements Tool {
                                             // (FACTOR * data_size) should 
                                             // result in only 1 spill
 
-  static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
+  enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
   
   /**
    * Generates random input data of given size with keys and values of given 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GenericMRLoadGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GenericMRLoadGenerator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GenericMRLoadGenerator.java
index 7ee28df..66b7cb8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GenericMRLoadGenerator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GenericMRLoadGenerator.java
@@ -241,7 +241,7 @@ public class GenericMRLoadGenerator extends Configured implements Tool {
     }
   }
 
-  static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
+  enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
 
   static class RandomMapOutput extends Mapper<Text,Text,Text,Text> {
     StringBuilder sentence = new StringBuilder();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/LargeSorter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/LargeSorter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/LargeSorter.java
index d6d0339..333ded9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/LargeSorter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/LargeSorter.java
@@ -67,7 +67,7 @@ public class LargeSorter extends Configured implements Tool {
   /**
    * User counters
    */
-  static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
+  enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
   
   /**
    * A custom input format that creates virtual inputs of a single string

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java
index 6cb3211..0bf30c8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java
@@ -97,7 +97,7 @@ public class RandomTextWriter extends Configured implements Tool {
   /**
    * User counters
    */
-  static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
+  enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
 
   static class RandomTextMapper extends Mapper<Text, Text, Text, Text> {
     

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomWriter.java
index 67c9ca8..b1093f0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomWriter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomWriter.java
@@ -89,7 +89,7 @@ public class RandomWriter extends Configured implements Tool {
   /**
    * User counters
    */
-  static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
+  enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
   
   /**
    * A custom input format that creates virtual inputs of a single string

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TimelineServicePerformance.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TimelineServicePerformance.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TimelineServicePerformance.java
index 7fa0444..b5b3d35 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TimelineServicePerformance.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TimelineServicePerformance.java
@@ -157,7 +157,7 @@ public class TimelineServicePerformance extends Configured implements Tool {
   /**
    * TimelineServer Performance counters
    */
-  static enum PerfCounters {
+  enum PerfCounters {
     TIMELINE_SERVICE_WRITE_TIME,
     TIMELINE_SERVICE_WRITE_COUNTER,
     TIMELINE_SERVICE_WRITE_FAILURES,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomTextWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomTextWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomTextWriter.java
index 6309ee6..7e6c099 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomTextWriter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomTextWriter.java
@@ -97,7 +97,7 @@ public class RandomTextWriter extends Configured implements Tool {
   /**
    * User counters
    */
-  static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
+  enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
 
   static class RandomTextMapper extends Mapper<Text, Text, Text, Text> {
     

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomWriter.java
index 8f322b1..904b325 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomWriter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomWriter.java
@@ -89,7 +89,7 @@ public class RandomWriter extends Configured implements Tool {
   /**
    * User counters
    */
-  static enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
+  enum Counters { RECORDS_WRITTEN, BYTES_WRITTEN }
   
   /**
    * A custom input format that creates virtual inputs of a single string

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4015f86/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
index 7fbb22a..53bbdc4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
@@ -68,7 +68,7 @@ import org.apache.hadoop.util.ToolRunner;
 public class TeraGen extends Configured implements Tool {
   private static final Log LOG = LogFactory.getLog(TeraGen.class);
 
-  public static enum Counters {CHECKSUM}
+  public enum Counters {CHECKSUM}
 
   /**
    * An input format that assigns ranges of longs to each mapper.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: YARN-6497. Method length of ResourceManager#serviceInit() is too long (Gergely Novák via jeagles)

Posted by xg...@apache.org.
YARN-6497. Method length of ResourceManager#serviceInit() is too long (Gergely Novák via jeagles)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbfed0e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbfed0e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbfed0e8

Branch: refs/heads/YARN-5734
Commit: cbfed0e82f57e96b8d5309e0613057963840554f
Parents: 13de636
Author: Jonathan Eagles <je...@yahoo-inc.com>
Authored: Wed May 31 10:18:09 2017 -0500
Committer: Jonathan Eagles <je...@yahoo-inc.com>
Committed: Wed May 31 10:18:42 2017 -0500

----------------------------------------------------------------------
 .../server/resourcemanager/ResourceManager.java | 27 ++++++++++----------
 1 file changed, 13 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbfed0e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 8f2c121..f727f55 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.EventDispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
@@ -238,13 +239,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
     rmContext.setConfigurationProvider(configurationProvider);
 
     // load core-site.xml
-    InputStream coreSiteXMLInputStream =
-        this.configurationProvider.getConfigurationInputStream(this.conf,
-            YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
-    if (coreSiteXMLInputStream != null) {
-      this.conf.addResource(coreSiteXMLInputStream,
-          YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
-    }
+    loadConfigurationXml(YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
 
     // Do refreshUserToGroupsMappings with loaded core-site.xml
     Groups.getUserToGroupsMappingServiceWithLoadedConfiguration(this.conf)
@@ -257,13 +252,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
     ProxyUsers.refreshSuperUserGroupsConfiguration(this.conf);
 
     // load yarn-site.xml
-    InputStream yarnSiteXMLInputStream =
-        this.configurationProvider.getConfigurationInputStream(this.conf,
-            YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
-    if (yarnSiteXMLInputStream != null) {
-      this.conf.addResource(yarnSiteXMLInputStream,
-          YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
-    }
+    loadConfigurationXml(YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
 
     validateConfigs(this.conf);
     
@@ -339,6 +328,16 @@ public class ResourceManager extends CompositeService implements Recoverable {
     super.serviceInit(this.conf);
   }
 
+  private void loadConfigurationXml(String configurationFile)
+      throws YarnException, IOException {
+    InputStream configurationInputStream =
+        this.configurationProvider.getConfigurationInputStream(this.conf,
+            configurationFile);
+    if (configurationInputStream != null) {
+      this.conf.addResource(configurationInputStream, configurationFile);
+    }
+  }
+
   protected EmbeddedElector createEmbeddedElector() throws IOException {
     EmbeddedElector elector;
     curatorEnabled =


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: HDFS-10816. TestComputeInvalidateWork#testDatanodeReRegistration fails due to race between test and replication monitor. Contributed by Eric Badger.

Posted by xg...@apache.org.
HDFS-10816. TestComputeInvalidateWork#testDatanodeReRegistration fails due to race between test and replication monitor. Contributed by Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4e203e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4e203e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4e203e0

Branch: refs/heads/YARN-5734
Commit: e4e203e0807fafc5dd765344d008e42bd51cc979
Parents: 7311015
Author: Kihwal Lee <ki...@apache.org>
Authored: Mon Jun 5 15:17:43 2017 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Mon Jun 5 15:18:27 2017 -0500

----------------------------------------------------------------------
 .../hdfs/server/blockmanagement/TestComputeInvalidateWork.java      | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4e203e0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
index 033f4d1..61e69f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
@@ -63,6 +63,7 @@ public class TestComputeInvalidateWork {
     namesystem = cluster.getNamesystem();
     bm = namesystem.getBlockManager();
     nodes = bm.getDatanodeManager().getHeartbeatManager().getDatanodes();
+    BlockManagerTestUtil.stopRedundancyThread(bm);
     assertEquals(nodes.length, NUM_OF_DATANODES);
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: YARN-6458. Use yarn package manager to lock down dependency versions for new web UI. Contributed by Sreenath Somarajapuram.

Posted by xg...@apache.org.
YARN-6458. Use yarn package manager to lock down dependency versions for new web UI. Contributed by Sreenath Somarajapuram.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46f7e919
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46f7e919
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46f7e919

Branch: refs/heads/YARN-5734
Commit: 46f7e91980b2c7a0fec749b5ce833be49d86c061
Parents: 367da9b
Author: Sunil G <su...@apache.org>
Authored: Sun Jun 4 22:05:14 2017 +0530
Committer: Sunil G <su...@apache.org>
Committed: Sun Jun 4 22:05:14 2017 +0530

----------------------------------------------------------------------
 .../hadoop-yarn/hadoop-yarn-ui/README.md        |   64 +
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml          |  152 +-
 .../hadoop-yarn-ui/src/main/webapp/.bowerrc     |    5 +-
 .../src/main/webapp/WEB-INF/wro.xml             |    9 +
 .../src/main/webapp/bower-shrinkwrap.json       |   66 +
 .../src/main/webapp/ember-cli-build.js          |   16 +-
 .../hadoop-yarn-ui/src/main/webapp/package.json |   10 +-
 .../hadoop-yarn-ui/src/main/webapp/yarn.lock    | 4983 ++++++++++++++++++
 8 files changed, 5253 insertions(+), 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f7e919/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/README.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/README.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/README.md
new file mode 100644
index 0000000..f67f351
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/README.md
@@ -0,0 +1,64 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+# Yarn UI
+
+The Yarn UI is an Ember based web-app that provides visualization of the applications running on the Apache Hadoop YARN framework.
+
+## Configurations
+
+* You can point the UI to custom locations by setting the environment variables in `src/main/webapp/config/configs.env`
+
+## Development
+
+All the following commands must be run inside `src/main/webapp`.
+
+### Prerequisites
+
+You will need the following things properly installed on your computer.
+
+* Install [Yarn](https://yarnpkg.com) v0.21.3
+* Install [Bower](http://bower.io/) v1.7.7
+* Install all dependencies by running `yarn install` & `bower install`
+
+### Running UI
+
+* `yarn start`
+* Visit your app at [http://localhost:4200](http://localhost:4200).
+
+### Building
+
+* `yarn run build` (production)
+* Files would be stored in "dist/"
+
+### Adding new dependencies
+
+**Warning: Do not edit the _package.json_ or _bower.json_ files manually. This could make them out-of-sync with the respective lock or shrinkwrap files.**
+
+Yarn UI has replaced NPM with Yarn package manager. And hence Yarn would be used to manage dependencies defined in package.json.
+
+* Please use the Yarn and Bower command-line tools to add new dependencies. And the tool version must be same as those defined in Prerequisites section.
+* Once any dependency is added:
+  *  If it's in package.json. Make sure that the respective, and only those changes are reflected in yarn.lock file.
+  *  If it's in bower.json. Make sure that the respective, and only those changes are reflected in bower-shrinkwrap.json file.
+* Commands to add using CLI tools:
+  * Yarn: yarn add [package-name]
+  * Bower: bower install --save [package-name]
+
+### Adding new routes (pages), controllers, components etc.
+
+* Use ember-cli blueprint generator - [Ember CLI](http://ember-cli.com/extending/#generators-and-blueprints)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f7e919/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index b920a32..34f7d0b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -26,15 +26,16 @@
   <artifactId>hadoop-yarn-ui</artifactId>
   <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN UI</name>
-  <packaging>${packaging.type}</packaging>
+  <packaging>${packagingType}</packaging>
 
   <properties>
-    <packaging.type>pom</packaging.type>
-    <webappTgtDir>${basedir}/target/src/main/webapp</webappTgtDir>
-    <node.executable>${basedir}/target/src/main/webapp/node/node</node.executable>
-    <nodeVersion>v5.7.1</nodeVersion>
-    <npmVersion>3.6.0</npmVersion>
-    <keep-ui-build-cache>false</keep-ui-build-cache>
+    <packagingType>pom</packagingType>
+
+    <webappDir>${basedir}/target/webapp</webappDir>
+    <nodeExecutable>${basedir}/target/webapp/node/node</nodeExecutable>
+    <packageManagerScript>node/yarn/dist/bin/yarn.js</packageManagerScript>
+
+    <keepUIBuildCache>false</keepUIBuildCache>
   </properties>
 
   <build>
@@ -47,7 +48,9 @@
           <excludes>
             <exclude>src/main/webapp/jsconfig.json</exclude>
             <exclude>src/main/webapp/bower.json</exclude>
+            <exclude>src/main/webapp/bower-shrinkwrap.json</exclude>
             <exclude>src/main/webapp/package.json</exclude>
+            <exclude>src/main/webapp/yarn.lock</exclude>
             <exclude>src/main/webapp/testem.json</exclude>
             <exclude>src/main/webapp/public/assets/images/**/*</exclude>
             <exclude>src/main/webapp/public/assets/images/*</exclude>
@@ -57,6 +60,7 @@
             <exclude>src/main/webapp/.ember-cli</exclude>
             <exclude>src/main/webapp/.jshintrc</exclude>
             <exclude>src/main/webapp/.watchmanconfig</exclude>
+            <exclude>src/main/webapp/WEB-INF/wro.xml</exclude>
           </excludes>
         </configuration>
       </plugin>
@@ -65,14 +69,14 @@
         <artifactId>maven-clean-plugin</artifactId>
         <version>3.0.0</version>
         <configuration>
-          <skip>${keep-ui-build-cache}</skip>
+          <skip>${keepUIBuildCache}</skip>
           <followSymLinks>false</followSymLinks>
           <filesets>
             <fileset>
-              <directory>${webappTgtDir}/bower_components</directory>
+              <directory>${webappDir}/bower_components</directory>
             </fileset>
             <fileset>
-              <directory>${webappTgtDir}/node_modules</directory>
+              <directory>${webappDir}/node_modules</directory>
             </fileset>
           </filesets>
         </configuration>
@@ -89,66 +93,79 @@
       </activation>
 
       <properties>
-        <packaging.type>war</packaging.type>
+        <packagingType>war</packagingType>
       </properties>
 
       <build>
         <plugins>
-          <!-- prepare source code -->
+          <!-- Copy files into target for build -->
           <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-antrun-plugin</artifactId>
+            <artifactId>maven-resources-plugin</artifactId>
             <executions>
               <execution>
-                <id>prepare-source-code</id>
-                <phase>generate-sources</phase>
+                <id>copy-resources</id>
+                <!-- here the phase you need -->
+                <phase>validate</phase>
                 <goals>
-                  <goal>run</goal>
+                  <goal>copy-resources</goal>
                 </goals>
                 <configuration>
-                  <target>
-                    <copy toDir="${basedir}/target/src/main/webapp">
-                      <fileset dir="${basedir}/src/main/webapp"/>
-                    </copy>
-
-                    <copy toDir="${basedir}/target/src/public">
-                      <fileset dir="${basedir}/public"/>
-                    </copy>
-                  </target>
+                  <outputDirectory>${webappDir}</outputDirectory>
+                  <resources>
+                    <resource>
+                      <directory>${basedir}/src/main/webapp</directory>
+                      <filtering>true</filtering>
+                      <excludes>
+                        <exclude>node_modules/**/*</exclude>
+                        <exclude>bower_components/**/*</exclude>
+                        <exclude>tmp/**/*</exclude>
+                        <exclude>dist/**/*</exclude>
+                      </excludes>
+                    </resource>
+                  </resources>
                 </configuration>
               </execution>
             </executions>
           </plugin>
 
+          <!-- Install Node, Yarn, Bower & dependencies -->
           <plugin>
             <groupId>com.github.eirslett</groupId>
             <artifactId>frontend-maven-plugin</artifactId>
-            <version>1.1</version>
+            <version>1.2</version>
             <configuration>
-              <workingDirectory>${webappTgtDir}</workingDirectory>
+              <workingDirectory>${webappDir}</workingDirectory>
             </configuration>
             <executions>
+
+              <!-- Install all dependencies -->
               <execution>
-                <phase>generate-sources</phase>
-                <id>install node and npm</id>
+                <phase>generate-resources</phase>
+                <id>install node and yarn</id>
                 <goals>
-                  <goal>install-node-and-npm</goal>
+                  <goal>install-node-and-yarn</goal>
                 </goals>
                 <configuration>
-                  <nodeVersion>${nodeVersion}</nodeVersion>
-                  <npmVersion>${npmVersion}</npmVersion>
+                  <nodeVersion>v5.12.0</nodeVersion>
+                  <yarnVersion>v0.21.3</yarnVersion>
                 </configuration>
               </execution>
               <execution>
-                <phase>generate-sources</phase>
-                <id>npm install</id>
+                <phase>generate-resources</phase>
+                <id>yarn install</id>
                 <goals>
-                  <goal>npm</goal>
+                  <goal>yarn</goal>
                 </goals>
+                <configuration>
+                  <arguments>install</arguments>
+                </configuration>
               </execution>
               <execution>
-                <phase>generate-sources</phase>
+                <phase>generate-resources</phase>
                 <id>bower install</id>
+                <configuration>
+                  <arguments>install</arguments>
+                </configuration>
                 <goals>
                   <goal>bower</goal>
                 </goals>
@@ -156,38 +173,80 @@
             </executions>
           </plugin>
 
-
-          <!-- Bower install & grunt build-->
           <plugin>
-            <artifactId>exec-maven-plugin</artifactId>
             <groupId>org.codehaus.mojo</groupId>
+            <artifactId>exec-maven-plugin</artifactId>
             <executions>
+
+              <!-- Build -->
               <execution>
                 <id>ember build</id>
-                <phase>generate-sources</phase>
+                <phase>generate-resources</phase>
                 <goals>
                   <goal>exec</goal>
                 </goals>
                 <configuration>
-                  <workingDirectory>${webappTgtDir}</workingDirectory>
-                  <executable>${node.executable}</executable>
+                  <workingDirectory>${webappDir}</workingDirectory>
+                  <executable>${nodeExecutable}</executable>
                   <arguments>
-                    <argument>node/node_modules/npm/bin/npm-cli</argument>
+                    <argument>${packageManagerScript}</argument>
                     <argument>run</argument>
                     <argument>build:mvn</argument>
                   </arguments>
                 </configuration>
               </execution>
+
             </executions>
           </plugin>
 
+          <!-- Asset minifier -->
+          <plugin>
+            <groupId>ro.isdc.wro4j</groupId>
+            <artifactId>wro4j-maven-plugin</artifactId>
+            <version>1.7.9</version>
+            <executions>
+              <execution>
+                <phase>prepare-package</phase>
+                <goals>
+                  <goal>run</goal>
+                </goals>
+              </execution>
+            </executions>
+            <configuration>
+              <minimize>true</minimize>
+              <targetGroups>yarn-ui,vendor</targetGroups>
+              <destinationFolder>${basedir}/target/minified-resources/assets</destinationFolder>
+              <contextFolder>${webappDir}/dist/assets</contextFolder>
+              <wroFile>${webappDir}/WEB-INF/wro.xml</wroFile>
+            </configuration>
+          </plugin>
+
           <!-- Package into war -->
           <plugin>
             <groupId>org.apache.maven.plugins</groupId>
             <artifactId>maven-war-plugin</artifactId>
+            <executions>
+              <execution>
+                <phase>package</phase>
+              </execution>
+            </executions>
             <configuration>
-              <webXml>${basedir}/src/main/webapp/WEB-INF/web.xml</webXml>
-              <warSourceDirectory>${webappTgtDir}/dist</warSourceDirectory>
+              <webXml>${webappDir}/WEB-INF/web.xml</webXml>
+              <warSourceDirectory>${webappDir}/dist</warSourceDirectory>
+              <webResources>
+                <resource>
+                  <filtering>false</filtering>
+                  <directory>${basedir}/target/minified-resources</directory>
+                </resource>
+              </webResources>
+            </configuration>
+          </plugin>
+
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-jar-plugin</artifactId>
+            <configuration>
+              <skipIfEmpty>true</skipIfEmpty>
             </configuration>
           </plugin>
 
@@ -195,4 +254,5 @@
       </build>
     </profile>
   </profiles>
+
 </project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f7e919/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
index 959e169..5b0b07d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
@@ -1,4 +1,7 @@
 {
   "directory": "bower_components",
-  "analytics": false
+  "analytics": false,
+  "resolvers": [
+    "bower-shrinkwrap-resolver-ext"
+  ]
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f7e919/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/wro.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/wro.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/wro.xml
new file mode 100644
index 0000000..64c925a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/wro.xml
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<groups xmlns="http://www.isdc.ro/wro">
+  <group name='vendor'>
+    <js>/vendor.js</js>
+  </group>
+  <group name='yarn-ui'>
+    <js>/yarn-ui.js</js>
+  </group>
+</groups>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f7e919/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower-shrinkwrap.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower-shrinkwrap.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower-shrinkwrap.json
new file mode 100644
index 0000000..b0f3aa3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower-shrinkwrap.json
@@ -0,0 +1,66 @@
+{
+  "https://github.com/DataTables/DataTables.git": {
+    "1.10.15": "1.10.15"
+  },
+  "https://github.com/components/ember-data.git": {
+    "2.1.0": "d8b4d3092f67afe22d9d374c40d719d557915fa3"
+  },
+  "https://github.com/components/ember.git": {
+    "2.2.0": "49e042ca89922ed96b27488c2a98add280ae7123"
+  },
+  "https://github.com/components/jqueryui.git": {
+    "1.11.4": "c34f8dbf3ba57b3784b93f26119f436c0e8288e1"
+  },
+  "https://github.com/dockyard/ember-qunit-notifications.git": {
+    "0.1.0": "a83277aa7a1c0545c66e6d133caebb9a620e71ad"
+  },
+  "https://github.com/dockyard/qunit-notifications.git": {
+    "0.1.1": "7a13f6dba5a340e1cb9e0b64c1c711e4d7edaca1"
+  },
+  "https://github.com/ember-cli/ember-cli-shims.git": {
+    "0.0.6": "dcab43b58d5698690050bb9a46ead5c8663c7da1"
+  },
+  "https://github.com/ember-cli/ember-cli-test-loader.git": {
+    "0.2.1": "3348d801089279296c38f31ae14d9c4d115ce154"
+  },
+  "https://github.com/ember-cli/ember-load-initializers.git": {
+    "0.1.7": "7bb21488563bd1bba23e903a812bf5815beddd1a"
+  },
+  "https://github.com/fgnass/spin.js.git": {
+    "2.3.2": "2.3.2"
+  },
+  "https://github.com/ivaynberg/select2.git": {
+    "4.0.0": "4.0.0"
+  },
+  "https://github.com/jquery/jquery-dist.git": {
+    "2.1.4": "7751e69b615c6eca6f783a81e292a55725af6b85"
+  },
+  "https://github.com/jquery/qunit.git": {
+    "1.19.0": "467e7e34652ad7d5883ce9c568461cf8c5e172a8"
+  },
+  "https://github.com/mbostock-bower/d3-bower.git": {
+    "3.5.17": "3.5.17"
+  },
+  "https://github.com/moment/moment-timezone.git": {
+    "0.5.0": "74a2e9378ecf4a31a168f3049f086565c8d66814"
+  },
+  "https://github.com/moment/moment.git": {
+    "2.10.6": "2.10.6",
+    "2.12.0": "d3d7488b4d60632854181cb0a9af325d57fb3d51"
+  },
+  "https://github.com/rwjblue/ember-qunit-builds.git": {
+    "0.4.16": "142c4066a5458bef9dfcb92b70152b9c01d79188"
+  },
+  "https://github.com/sreenaths/more-js.git": {
+    "0.8.2": "0.8.2"
+  },
+  "https://github.com/sreenaths/snippet-ss.git": {
+    "1.11.0": "c1abc566f4e001b7f1939b6dbdd911eadc969cf9"
+  },
+  "https://github.com/stefanpenner/loader.js.git": {
+    "3.3.0": "ac909550c9544325632542bbea97531cc60bc628"
+  },
+  "https://github.com/twbs/bootstrap.git": {
+    "3.3.6": "81df608a40bf0629a1dc08e584849bb1e43e0b7a"
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f7e919/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
index b75a2e9..4799f92 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
@@ -21,8 +21,22 @@ var Funnel = require("broccoli-funnel");
 var EmberApp = require('ember-cli/lib/broccoli/ember-app');
 
 module.exports = function(defaults) {
+  var isProd = EmberApp.env() === 'production';
   var app = new EmberApp(defaults, {
-    hinting: true
+    storeConfigInMeta: false,
+    minifyCSS: {
+      enabled: isProd
+    },
+    minifyJS: {
+      // Will be minified by wro4j-maven-plugin for performance
+      enabled: false,
+    },
+    fingerprint: {
+      enabled: false
+    },
+    sourcemaps: {
+      enabled: !isProd
+    }
   });
 
   app.import("bower_components/datatables/media/css/jquery.dataTables.min.css");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f7e919/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
index 2964d33..2830be3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
@@ -10,7 +10,6 @@
   "scripts": {
     "build": "TMPDIR=tmp node ./node_modules/ember-cli/bin/ember build",
     "start": "TMPDIR=tmp node ./node_modules/ember-cli/bin/ember server",
-
     "build:mvn": "TMPDIR=tmp node/node ./node_modules/ember-cli/bin/ember build -prod"
   },
   "repository": "",
@@ -21,12 +20,13 @@
   "license": "Apache",
   "devDependencies": {
     "bower": "1.7.7",
+    "bower-shrinkwrap-resolver-ext": "0.1.0",
     "broccoli-asset-rev": "2.4.2",
     "broccoli-funnel": "1.0.1",
     "broccoli-merge-trees": "1.1.1",
     "ember-array-contains-helper": "1.0.2",
     "ember-bootstrap": "0.5.1",
-    "ember-cli": "^1.13.13",
+    "ember-cli": "1.13.14",
     "ember-cli-app-version": "1.0.0",
     "ember-cli-babel": "5.1.6",
     "ember-cli-content-security-policy": "0.4.0",
@@ -49,8 +49,10 @@
     "ember-lodash": "0.0.10",
     "ember-resolver": "2.0.3",
     "ember-spin-spinner": "0.2.3",
-    "ember-truth-helpers": "1.2.0",
-    "select2": "4.0.0"
+    "ember-truth-helpers": "1.3.0",
+    "loader.js": "4.2.3",
+    "select2": "4.0.0",
+    "testem": "0.9.11"
   },
   "dependencies": {
     "em-helpers": "^0.8.0",


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: HADOOP-14464. hadoop-aws doc header warning #5 line wrapped. Contributed by John Zhuge.

Posted by xg...@apache.org.
HADOOP-14464. hadoop-aws doc header warning #5 line wrapped. Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c6a7a59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c6a7a59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c6a7a59

Branch: refs/heads/YARN-5734
Commit: 6c6a7a59622ba7c1e4faa5534f4479de0cd84993
Parents: 31058b2
Author: John Zhuge <jz...@apache.org>
Authored: Sat May 27 23:56:49 2017 -0700
Committer: John Zhuge <jz...@apache.org>
Committed: Sun May 28 22:25:00 2017 -0700

----------------------------------------------------------------------
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md        | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c6a7a59/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index e5aa431..8c8df1b 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -109,8 +109,7 @@ Do not inadvertently share these credentials through means such as
 
 If you do any of these: change your credentials immediately!
 
-### Warning #5: The S3 client provided by Amazon EMR are not from the Apache
-Software foundation, and are only supported by Amazon.
+### Warning #5: The S3 client provided by Amazon EMR are not from the Apache Software foundation, and are only supported by Amazon.
 
 Specifically: on Amazon EMR, s3a is not supported, and amazon recommend
 a different filesystem implementation. If you are using Amazon EMR, follow


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: HADOOP-14466. Remove useless document from TestAliyunOSSFileSystemContract.java. Contributed by Chen Liang.

Posted by xg...@apache.org.
HADOOP-14466. Remove useless document from TestAliyunOSSFileSystemContract.java. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e6e9658
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e6e9658
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e6e9658

Branch: refs/heads/YARN-5734
Commit: 6e6e96583f1ee3c8a8775480f41c3adcdd5e2c45
Parents: 9224348
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Jun 1 13:08:01 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Jun 1 13:08:01 2017 +0900

----------------------------------------------------------------------
 .../hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java   | 5 -----
 1 file changed, 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e6e9658/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
index 321e958..46ab339 100644
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
+++ b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
@@ -38,11 +38,6 @@ import static org.junit.Assume.assumeTrue;
 
 /**
  * Tests a live Aliyun OSS system.
- *
- * This uses BlockJUnit4ClassRunner because FileSystemContractBaseTest from
- * TestCase which uses the old Junit3 runner that doesn't ignore assumptions
- * properly making it impossible to skip the tests if we don't have a valid
- * bucket.
  */
 public class TestAliyunOSSFileSystemContract
     extends FileSystemContractBaseTest {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: HDFS-11891. DU#refresh should print the path of the directory when an exception is caught. Contributed by Chen Liang.

Posted by xg...@apache.org.
HDFS-11891. DU#refresh should print the path of the directory when an exception is caught. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd6a2172
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd6a2172
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd6a2172

Branch: refs/heads/YARN-5734
Commit: bd6a2172e0442e5f02bad9bc5f0568045f57bd32
Parents: 2cd612b
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri May 26 16:02:40 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri May 26 16:02:40 2017 -0700

----------------------------------------------------------------------
 .../hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java      | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd6a2172/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
index b64a19d..6e374c9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
@@ -52,7 +52,8 @@ public class DU extends CachingGetSpaceUsed {
     try {
       duShell.startRefresh();
     } catch (IOException ioe) {
-      LOG.warn("Could not get disk usage information", ioe);
+      LOG.warn("Could not get disk usage information for path {}",
+          getDirPath(), ioe);
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: HDFS-11899. ASF License warnings generated intermittently in trunk. Contributed by Yiqun Lin.

Posted by xg...@apache.org.
HDFS-11899. ASF License warnings generated intermittently in trunk. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/367da9b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/367da9b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/367da9b7

Branch: refs/heads/YARN-5734
Commit: 367da9b7b8837d2ffa30a7d2716e463adf2d7007
Parents: 73ecb19
Author: Yiqun Lin <yq...@apache.org>
Authored: Sat Jun 3 22:07:24 2017 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Sat Jun 3 22:07:24 2017 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/server/balancer/TestBalancer.java | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/367da9b7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index 167997e..dd28914 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -1022,14 +1022,14 @@ public class TestBalancer {
     if (!p.getExcludedNodes().isEmpty()) {
       args.add("-exclude");
       if (useFile) {
-        excludeHostsFile = new File ("exclude-hosts-file");
+        excludeHostsFile = GenericTestUtils.getTestDir("exclude-hosts-file");
         PrintWriter pw = new PrintWriter(excludeHostsFile);
         for (String host : p.getExcludedNodes()) {
           pw.write( host + "\n");
         }
         pw.close();
         args.add("-f");
-        args.add("exclude-hosts-file");
+        args.add(excludeHostsFile.getAbsolutePath());
       } else {
         args.add(StringUtils.join(p.getExcludedNodes(), ','));
       }
@@ -1039,14 +1039,14 @@ public class TestBalancer {
     if (!p.getIncludedNodes().isEmpty()) {
       args.add("-include");
       if (useFile) {
-        includeHostsFile = new File ("include-hosts-file");
+        includeHostsFile = GenericTestUtils.getTestDir("include-hosts-file");
         PrintWriter pw = new PrintWriter(includeHostsFile);
         for (String host : p.getIncludedNodes()) {
           pw.write( host + "\n");
         }
         pw.close();
         args.add("-f");
-        args.add("include-hosts-file");
+        args.add(includeHostsFile.getAbsolutePath());
       } else {
         args.add(StringUtils.join(p.getIncludedNodes(), ','));
       }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: HADOOP-13921. Remove log4j classes from JobConf.

Posted by xg...@apache.org.
HADOOP-13921. Remove log4j classes from JobConf.

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5517a82
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5517a82
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5517a82

Branch: refs/heads/YARN-5734
Commit: f5517a82001eea2207a93d3b70d42ad8f4ddeccb
Parents: 6e6e965
Author: Sean Busbey <bu...@cloudera.com>
Authored: Thu May 4 11:16:25 2017 -0500
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Jun 1 13:35:14 2017 +0900

----------------------------------------------------------------------
 hadoop-client-modules/hadoop-client-runtime/pom.xml           | 1 -
 .../main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java | 4 ++--
 .../src/main/java/org/apache/hadoop/mapred/JobConf.java       | 7 +++----
 3 files changed, 5 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5517a82/hadoop-client-modules/hadoop-client-runtime/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index dc0f005..3c8364c 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -96,7 +96,6 @@
     </dependency>
     <!-- Move log4j to optional, since it is needed for some pieces folks might not use:
          * one of the three custom log4j appenders we have
-         * JobConf (?!) (so essentially any user of MapReduce)
       -->
     <dependency>
       <groupId>log4j</groupId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5517a82/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
index 5fd66ac..a43da65 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
@@ -650,12 +650,12 @@ public class MRApps extends Apps {
     if (isMap) {
       return conf.get(
           MRJobConfig.MAP_LOG_LEVEL,
-          JobConf.DEFAULT_LOG_LEVEL.toString()
+          JobConf.DEFAULT_LOG_LEVEL
       );
     } else {
       return conf.get(
           MRJobConfig.REDUCE_LOG_LEVEL,
-          JobConf.DEFAULT_LOG_LEVEL.toString()
+          JobConf.DEFAULT_LOG_LEVEL
       );
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5517a82/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
index f286a96..be8fa9e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
@@ -53,7 +53,6 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.util.ClassUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Tool;
-import org.apache.log4j.Level;
 
 /** 
  * A map/reduce job configuration.
@@ -333,7 +332,7 @@ public class JobConf extends Configuration {
   private Credentials credentials = new Credentials();
   
   /**
-   * Configuration key to set the logging {@link Level} for the map task.
+   * Configuration key to set the logging level for the map task.
    *
    * The allowed logging levels are:
    * OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
@@ -342,7 +341,7 @@ public class JobConf extends Configuration {
     JobContext.MAP_LOG_LEVEL;
   
   /**
-   * Configuration key to set the logging {@link Level} for the reduce task.
+   * Configuration key to set the logging level for the reduce task.
    *
    * The allowed logging levels are:
    * OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
@@ -353,7 +352,7 @@ public class JobConf extends Configuration {
   /**
    * Default logging level for map/reduce tasks.
    */
-  public static final Level DEFAULT_LOG_LEVEL = Level.INFO;
+  public static final String DEFAULT_LOG_LEVEL = JobContext.DEFAULT_LOG_LEVEL;
 
   /**
    * The variable is kept for M/R 1.x applications, M/R 2.x applications should


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: YARN-6246. Identifying starved apps does not need the scheduler writelock (Contributed by Karthik Kambatla via Daniel Templeton)

Posted by xg...@apache.org.
YARN-6246. Identifying starved apps does not need the scheduler writelock
(Contributed by Karthik Kambatla via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5b71e41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5b71e41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5b71e41

Branch: refs/heads/YARN-5734
Commit: d5b71e4175c13679d451710be150fc461a661263
Parents: 4369690
Author: Daniel Templeton <te...@apache.org>
Authored: Wed May 31 15:48:04 2017 -0700
Committer: Daniel Templeton <te...@apache.org>
Committed: Wed May 31 15:48:04 2017 -0700

----------------------------------------------------------------------
 .../scheduler/fair/FSLeafQueue.java             |  9 +++----
 .../scheduler/fair/FSParentQueue.java           |  4 +--
 .../resourcemanager/scheduler/fair/FSQueue.java | 19 +++++++++-----
 .../scheduler/fair/FairScheduler.java           | 27 ++++++++++++++------
 4 files changed, 38 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5b71e41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 10f1e28..1de0e30 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -198,13 +198,10 @@ public class FSLeafQueue extends FSQueue {
   }
 
   @Override
-  public void updateInternal(boolean checkStarvation) {
+  void updateInternal() {
     readLock.lock();
     try {
       policy.computeShares(runnableApps, getFairShare());
-      if (checkStarvation) {
-        updateStarvedApps();
-      }
     } finally {
       readLock.unlock();
     }
@@ -283,8 +280,10 @@ public class FSLeafQueue extends FSQueue {
    * If this queue is starving due to fairshare, there must be at least
    * one application that is starved. And, even if the queue is not
    * starved due to fairshare, there might still be starved applications.
+   *
+   * Caller does not need read/write lock on the leaf queue.
    */
-  private void updateStarvedApps() {
+  void updateStarvedApps() {
     // Fetch apps with pending demand
     TreeSet<FSAppAttempt> appsWithDemand = fetchAppsWithDemand(false);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5b71e41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
index b062c58..5b4e4dc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
@@ -79,13 +79,13 @@ public class FSParentQueue extends FSQueue {
   }
 
   @Override
-  public void updateInternal(boolean checkStarvation) {
+  void updateInternal() {
     readLock.lock();
     try {
       policy.computeShares(childQueues, getFairShare());
       for (FSQueue childQueue : childQueues) {
         childQueue.getMetrics().setFairShare(childQueue.getFairShare());
-        childQueue.updateInternal(checkStarvation);
+        childQueue.updateInternal();
       }
     } finally {
       readLock.unlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5b71e41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
index e131140..12b1b83 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
@@ -326,16 +326,23 @@ public abstract class FSQueue implements Queue, Schedulable {
 
   /**
    * Recomputes the shares for all child queues and applications based on this
-   * queue's current share, and checks for starvation.
+   * queue's current share.
    *
-   * @param checkStarvation whether to check for fairshare or minshare
-   *                        starvation on update
+   * To be called holding the scheduler writelock.
    */
-  abstract void updateInternal(boolean checkStarvation);
+  abstract void updateInternal();
 
-  public void update(Resource fairShare, boolean checkStarvation) {
+  /**
+   * Set the queue's fairshare and update the demand/fairshare of child
+   * queues/applications.
+   *
+   * To be called holding the scheduler writelock.
+   *
+   * @param fairShare
+   */
+  public void update(Resource fairShare) {
     setFairShare(fairShare);
-    updateInternal(checkStarvation);
+    updateInternal();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5b71e41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index d1a237a..d779159 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -366,20 +366,31 @@ public class FairScheduler extends
    */
   @VisibleForTesting
   public void update() {
-    try {
-      writeLock.lock();
-
-      FSQueue rootQueue = queueMgr.getRootQueue();
+    FSQueue rootQueue = queueMgr.getRootQueue();
 
+    // Update demands and fairshares
+    writeLock.lock();
+    try {
       // Recursively update demands for all queues
       rootQueue.updateDemand();
-
-      Resource clusterResource = getClusterResource();
-      rootQueue.update(clusterResource, shouldAttemptPreemption());
+      rootQueue.update(getClusterResource());
 
       // Update metrics
       updateRootQueueMetrics();
+    } finally {
+      writeLock.unlock();
+    }
+
+    readLock.lock();
+    try {
+      // Update starvation stats and identify starved applications
+      if (shouldAttemptPreemption()) {
+        for (FSLeafQueue queue : queueMgr.getLeafQueues()) {
+          queue.updateStarvedApps();
+        }
+      }
 
+      // Log debug information
       if (LOG.isDebugEnabled()) {
         if (--updatesToSkipForDebug < 0) {
           updatesToSkipForDebug = UPDATE_DEBUG_FREQUENCY;
@@ -387,7 +398,7 @@ public class FairScheduler extends
         }
       }
     } finally {
-      writeLock.unlock();
+      readLock.unlock();
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: HDFS-11901. Modifier 'static' is redundant for inner enums. Contributed by ZhangBing Lin.

Posted by xg...@apache.org.
HDFS-11901. Modifier 'static' is redundant for inner enums. Contributed by ZhangBing Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13de636b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13de636b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13de636b

Branch: refs/heads/YARN-5734
Commit: 13de636b4079b077890ad10389ff350dcf8086a2
Parents: 547f18c
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Wed May 31 23:09:08 2017 +0800
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Wed May 31 23:09:08 2017 +0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java  | 4 ++--
 .../src/main/java/org/apache/hadoop/lib/server/Server.java       | 2 +-
 .../main/java/org/apache/hadoop/lib/server/ServerException.java  | 2 +-
 .../src/test/java/org/apache/hadoop/lib/lang/TestXException.java | 2 +-
 .../src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java      | 2 +-
 .../main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java   | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java  | 2 +-
 .../main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java | 2 +-
 .../hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java   | 2 +-
 .../hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java  | 2 +-
 .../org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java   | 2 +-
 .../org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java    | 2 +-
 .../hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java       | 2 +-
 .../java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java | 2 +-
 .../java/org/apache/hadoop/hdfs/server/namenode/Content.java     | 2 +-
 .../hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java   | 2 +-
 .../java/org/apache/hadoop/hdfs/server/namenode/NameNode.java    | 2 +-
 .../hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java       | 2 +-
 .../hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java    | 2 +-
 .../hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java     | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/util/Diff.java          | 2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java   | 2 +-
 .../hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java   | 2 +-
 23 files changed, 24 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 055a57e..5922958 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -139,7 +139,7 @@ public class HttpFSFileSystem extends FileSystem
 
   public static final String UPLOAD_CONTENT_TYPE= "application/octet-stream";
 
-  public static enum FILE_TYPE {
+  public enum FILE_TYPE {
     FILE, DIRECTORY, SYMLINK;
 
     public static FILE_TYPE getType(FileStatus fileStatus) {
@@ -210,7 +210,7 @@ public class HttpFSFileSystem extends FileSystem
   private static final String HTTP_DELETE = "DELETE";
 
   @InterfaceAudience.Private
-  public static enum Operation {
+  public enum Operation {
     OPEN(HTTP_GET), GETFILESTATUS(HTTP_GET), LISTSTATUS(HTTP_GET),
     GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
     GETFILECHECKSUM(HTTP_GET),  GETFILEBLOCKLOCATIONS(HTTP_GET),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
index 82be027..57f651a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
@@ -101,7 +101,7 @@ public class Server {
    * Enumeration that defines the server status.
    */
   @InterfaceAudience.Private
-  public static enum Status {
+  public enum Status {
     UNDEF(false, false),
     BOOTING(false, true),
     HALTED(true, true),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
index e3759ce..22bfa5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
@@ -31,7 +31,7 @@ public class ServerException extends XException {
    * Error codes use by the {@link Server} class.
    */
   @InterfaceAudience.Private
-  public static enum ERROR implements XException.ERROR {
+  public enum ERROR implements XException.ERROR {
     S01("Dir [{0}] does not exist"),
     S02("[{0}] is not a directory"),
     S03("Could not load file from classpath [{0}], {1}"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java
index 59d02e3..2869d47 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java
@@ -27,7 +27,7 @@ import org.junit.Test;
 
 public class TestXException extends HTestCase {
 
-  public static enum TestERROR implements XException.ERROR {
+  public enum TestERROR implements XException.ERROR {
     TC;
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java
index 92719db..553ce9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java
@@ -100,7 +100,7 @@ public class TestParam {
     test(param, "L", "a long", 1L, 2L, "x", null);
   }
 
-  public static enum ENUM {
+  public enum ENUM {
     FOO, BAR
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
index 9371a72..2617019 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
@@ -72,7 +72,7 @@ class OpenFileCtx {
   // Pending writes water mark for dump, 1MB
   private static long DUMP_WRITE_WATER_MARK = 1024 * 1024;
 
-  static enum COMMIT_STATUS {
+  enum COMMIT_STATUS {
     COMMIT_FINISHED,
     COMMIT_WAIT,
     COMMIT_INACTIVE_CTX,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
index f4c32f6..f89679f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
@@ -44,7 +44,7 @@ class WriteCtx {
    * wait for prerequisite writes. NO_DUMP: sequential write, no need to dump
    * since it will be written to HDFS soon. DUMPED: already dumped to a file.
    */
-  public static enum DataState {
+  public enum DataState {
     ALLOW_DUMP,
     NO_DUMP,
     DUMPED

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
index 1750790..671971d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
@@ -74,7 +74,7 @@ public class LayoutVersion {
    * </li>
    * </ul>
    */
-  public static enum Feature implements LayoutFeature {
+  public enum Feature implements LayoutFeature {
     NAMESPACE_QUOTA(-16, "Support for namespace quotas"),
     FILE_ACCESS_TIME(-17, "Support for access time on files"),
     DISKSPACE_QUOTA(-18, "Support for disk space quotas"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index 8a097a5..0442588 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
@@ -46,7 +46,7 @@ import com.google.common.annotations.VisibleForTesting;
 public class CorruptReplicasMap{
 
   /** The corruption reason code */
-  public static enum Reason {
+  public enum Reason {
     NONE,                // not specified.
     ANY,                 // wildcard reason
     GENSTAMP_MISMATCH,   // mismatch in generation stamps

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index 8af86d3..b1ccea2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -398,7 +398,7 @@ public class DatanodeStorageInfo {
     this.remaining = remaining;
   }
 
-  static enum AddBlockResult {
+  enum AddBlockResult {
     ADDED, REPLACED, ALREADY_EXIST
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index c1cda57..fcf7d5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -96,7 +96,7 @@ class BPServiceActor implements Runnable {
   Thread bpThread;
   DatanodeProtocolClientSideTranslatorPB bpNamenode;
 
-  static enum RunningState {
+  enum RunningState {
     CONNECTING, INIT_FAILED, RUNNING, EXITED, FAILED;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index c5462a9..a0e646d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -1185,7 +1185,7 @@ class BlockReceiver implements Closeable {
     return handler;
   }
 
-  private static enum PacketResponderType {
+  private enum PacketResponderType {
     NON_PIPELINE, LAST_IN_PIPELINE, HAS_DOWNSTREAM_IN_PIPELINE
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
index 609a740..abe7b74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
@@ -61,7 +61,7 @@ public class DataNodeLayoutVersion {
    * </li>
    * </ul>
    */
-  public static enum Feature implements LayoutFeature {
+  public enum Feature implements LayoutFeature {
     FIRST_LAYOUT(-55, -53, "First datanode layout", false),
     BLOCKID_BASED_LAYOUT(-56,
         "The block ID of a finalized block uniquely determines its position " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
index 450ddee..0737824 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
@@ -55,7 +55,7 @@ public class BackupImage extends FSImage {
    *   stopApplyingOnNextRoll is true.
    */
   volatile BNState bnState;
-  static enum BNState {
+  enum BNState {
     /**
      * Edits from the NN should be dropped. On the next log roll,
      * transition to JOURNAL_ONLY state

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java
index c1caae5..cb5a2f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java
@@ -63,7 +63,7 @@ public enum Content {
   public static class CountsMap
       extends EnumCounters.Map<CountsMap.Key, Content, Counts> {
     /** The key type of the map. */
-    public static enum Key { CURRENT, SNAPSHOT }
+    public enum Key { CURRENT, SNAPSHOT }
 
     CountsMap() {
       super(FACTORY);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
index 8ff15a8..fe58577 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
@@ -33,7 +33,7 @@ import com.google.common.base.Preconditions;
 @InterfaceAudience.Private
 public abstract class INodeWithAdditionalFields extends INode
     implements LinkedElement {
-  static enum PermissionStatusFormat {
+  enum PermissionStatusFormat {
     MODE(null, 16),
     GROUP(MODE.BITS, 25),
     USER(GROUP.BITS, 23);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 32d268a..2bc3642 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -209,7 +209,7 @@ public class NameNode extends ReconfigurableBase implements
   /**
    * Categories of operations supported by the namenode.
    */
-  public static enum OperationCategory {
+  public enum OperationCategory {
     /** Operations that are state agnostic */
     UNCHECKED,
     /** Read operation that does not change the namespace state */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
index 2943fc2..182cc08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
@@ -77,7 +77,7 @@ public class NameNodeLayoutVersion {
    * support downgrade.
    * </ul>
    */
-  public static enum Feature implements LayoutFeature {
+  public enum Feature implements LayoutFeature {
     ROLLING_UPGRADE(-55, -53, -55, "Support rolling upgrade", false),
     EDITLOG_LENGTH(-56, -56, "Add length field to every edit log op"),
     XATTRS(-57, -57, "Extended attributes"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java
index 740036a..355f09f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java
@@ -28,7 +28,7 @@ public class ReceivedDeletedBlockInfo {
   BlockStatus status;
   String delHints;
 
-  public static enum BlockStatus {
+  public enum BlockStatus {
     RECEIVING_BLOCK(1),
     RECEIVED_BLOCK(2),
     DELETED_BLOCK(3);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
index 1882e58..a6901f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
@@ -61,7 +61,7 @@ class OfflineEditsXmlLoader
   private long nextTxId;
   private final OpInstanceCache opCache = new OpInstanceCache();
   
-  static enum ParseState {
+  enum ParseState {
     EXPECT_EDITS_TAG,
     EXPECT_VERSION,
     EXPECT_RECORD,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java
index 54e7103..26d1fb5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java
@@ -73,7 +73,7 @@ import com.google.common.base.Preconditions;
  * @param <E> The element type, which must implement {@link Element} interface.
  */
 public class Diff<K, E extends Diff.Element<K>> {
-  public static enum ListType {
+  public enum ListType {
     CREATED, DELETED
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
index 9dff529..e191414 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
@@ -1123,7 +1123,7 @@ public class TestFileCreation {
     doCreateTest(CreationMethod.PATH_FROM_URI);
   }
   
-  private static enum CreationMethod {
+  private enum CreationMethod {
     DIRECT_NN_RPC,
     PATH_FROM_URI,
     PATH_FROM_STRING

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13de636b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
index 079038c..38c2b2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
@@ -82,7 +82,7 @@ public class TestFailureToReadEdits {
   private NameNode nn1;
   private FileSystem fs;
   
-  private static enum TestType {
+  private enum TestType {
     SHARED_DIR_HA,
     QJM_HA;
   };


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: HADOOP-14436. Remove the redundant colon in ViewFs.md. Contributed by maobaolong.

Posted by xg...@apache.org.
HADOOP-14436. Remove the redundant colon in ViewFs.md. Contributed by maobaolong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/056cc728
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/056cc728
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/056cc728

Branch: refs/heads/YARN-5734
Commit: 056cc72885471d6952ff182670e4b4a38421603d
Parents: 8d9084e
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Fri Jun 2 22:39:10 2017 +0800
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Fri Jun 2 22:39:10 2017 +0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/056cc728/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
index 6e8ce67..3810e28 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
@@ -118,7 +118,7 @@ Hence on Cluster X, where the `core-site.xml` is set to make the default fs to u
 
     * It is an URI for referring a pathname on another cluster such as Cluster Y. In particular, the command for copying files from cluster Y to Cluster Z looks like:
 
-            distcp viewfs://clusterY:/pathSrc viewfs://clusterZ/pathDest
+            distcp viewfs://clusterY/pathSrc viewfs://clusterZ/pathDest
 
 4.  `viewfs://clusterX-webhdfs/foo/bar`
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: HDFS-11928. Segment overflow in FileDistributionCalculator. Contributed by LiXin Ge.

Posted by xg...@apache.org.
HDFS-11928. Segment overflow in FileDistributionCalculator. Contributed by LiXin Ge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83556098
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83556098
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83556098

Branch: refs/heads/YARN-5734
Commit: 835560983ed74a2798d2fb3a2bb0bb46a4ee7c55
Parents: e4e203e
Author: Arpit Agarwal <ar...@apache.org>
Authored: Mon Jun 5 13:21:22 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Jun 5 13:21:22 2017 -0700

----------------------------------------------------------------------
 .../tools/offlineImageViewer/FileDistributionCalculator.java     | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83556098/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
index 71fb822..25a7bbe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
@@ -160,8 +160,8 @@ final class FileDistributionCalculator {
               + StringUtils.byteDesc(((long) (i == 0 ? 0 : i - 1) * steps))
               + ", "
               + StringUtils.byteDesc((long)
-                  (i == distribution.length - 1 ? maxFileSize : i * steps))
-                  + "]\t" + distribution[i]);
+                  (i == distribution.length - 1 ? maxFileSize :
+                      (long) i * steps)) + "]\t" + distribution[i]);
         } else {
           out.print(((long) i * steps) + "\t" + distribution[i]);
         }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: YARN-6635. Refactor yarn-app pages in new YARN UI. Contributed by Akhil PB.

Posted by xg...@apache.org.
YARN-6635. Refactor yarn-app pages in new YARN UI. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af03c333
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af03c333
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af03c333

Branch: refs/heads/YARN-5734
Commit: af03c3334610bc4d8788e7c7b21d5aa6b946fe26
Parents: 07e60f8
Author: Sunil G <su...@apache.org>
Authored: Tue May 30 13:52:40 2017 +0530
Committer: Sunil G <su...@apache.org>
Committed: Tue May 30 13:52:40 2017 +0530

----------------------------------------------------------------------
 .../webapp/app/controllers/app-table-columns.js |   4 +-
 .../webapp/app/controllers/yarn-app-attempt.js  |   8 +-
 .../webapp/app/controllers/yarn-app-attempts.js |  57 ------
 .../src/main/webapp/app/controllers/yarn-app.js |  56 +++---
 .../webapp/app/controllers/yarn-app/attempts.js |  24 +++
 .../webapp/app/controllers/yarn-app/charts.js   |  28 +++
 .../webapp/app/controllers/yarn-app/info.js     |  32 ++++
 .../app/controllers/yarn-apps/services.js       |  31 ----
 .../webapp/app/controllers/yarn-flowrun/info.js |   2 +-
 .../src/main/webapp/app/router.js               |   8 +-
 .../main/webapp/app/routes/yarn-app-attempts.js |  35 ----
 .../src/main/webapp/app/routes/yarn-app.js      |  35 +---
 .../main/webapp/app/routes/yarn-app/attempts.js |  37 ++++
 .../main/webapp/app/routes/yarn-app/charts.js   |  53 ++++++
 .../src/main/webapp/app/routes/yarn-app/info.js |  37 ++++
 .../webapp/app/routes/yarn-apps/services.js     |  33 ----
 .../main/webapp/app/templates/application.hbs   |   2 +-
 .../app/templates/components/app-table.hbs      |   6 +-
 .../webapp/app/templates/yarn-app-attempts.hbs  |  58 ------
 .../src/main/webapp/app/templates/yarn-app.hbs  | 185 +------------------
 .../webapp/app/templates/yarn-app/attempts.hbs  |  29 +++
 .../webapp/app/templates/yarn-app/charts.hbs    |  43 +++++
 .../main/webapp/app/templates/yarn-app/info.hbs | 167 +++++++++++++++++
 .../webapp/app/templates/yarn-app/loading.hbs   |  23 +++
 .../src/main/webapp/app/templates/yarn-apps.hbs |   5 +-
 .../webapp/app/templates/yarn-apps/services.hbs |  25 ---
 .../main/webapp/app/templates/yarn-services.hbs |   3 +-
 .../unit/controllers/yarn-app-attempts-test.js  |  30 ---
 .../unit/controllers/yarn-app/attempts-test.js  |  30 +++
 .../unit/controllers/yarn-app/charts-test.js    |  30 +++
 .../unit/controllers/yarn-app/info-test.js      |  30 +++
 .../unit/controllers/yarn-apps/services-test.js |  30 ---
 .../tests/unit/routes/yarn-app-attempts-test.js |  29 ---
 .../tests/unit/routes/yarn-app/attempts-test.js |  29 +++
 .../tests/unit/routes/yarn-app/charts-test.js   |  29 +++
 .../tests/unit/routes/yarn-app/info-test.js     |  29 +++
 36 files changed, 714 insertions(+), 578 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
index 704abfb..8a34f1a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
@@ -38,7 +38,7 @@ export default Ember.Controller.extend({
           getCellContent: function(row) {
             return {
               displayText: row.id,
-              href: `#/yarn-app/${row.id}`
+              href: `#/yarn-app/${row.id}/info`
             };
           }
       }, {
@@ -112,7 +112,7 @@ export default Ember.Controller.extend({
       getCellContent: function(row) {
         return {
           displayText: row.get('appName'),
-          href: `#/yarn-app/${row.id}?service=${row.get('appName')}`
+          href: `#/yarn-app/${row.id}/info?service=${row.get('appName')}`
         };
       }
     }, {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
index fbe6fa9..1121a84 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
@@ -34,10 +34,10 @@ export default Ember.Controller.extend({
       routeName: 'yarn-apps.apps'
     }, {
       text: `App [${appId}]`,
-      href: `#/yarn-app/${appId}`
+      href: `#/yarn-app/${appId}/info`
     }, {
       text: "Attempts",
-      href: `#/yarn-app-attempts/${appId}`
+      href: `#/yarn-app/${appId}/attempts`
     }, {
       text: `Attempt [${attemptId}]`
     }];
@@ -50,10 +50,10 @@ export default Ember.Controller.extend({
         routeName: 'yarn-services'
       }, {
         text: `${serviceName} [${appId}]`,
-        href: `#/yarn-app/${appId}?service=${serviceName}`
+        href: `#/yarn-app/${appId}/info?service=${serviceName}`
       }, {
         text: "Attempts",
-        href: `#/yarn-app-attempts/${appId}?service=${serviceName}`
+        href: `#/yarn-app/${appId}/attempts?service=${serviceName}`
       }, {
         text: `Attempt [${attemptId}]`
       }];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
deleted file mode 100644
index 77e531e..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import Ember from 'ember';
-
-export default Ember.Controller.extend({
-  queryParams: ["service"],
-  service: undefined,
-
-  breadcrumbs: Ember.computed("model.appId", function () {
-    var appId = this.get("model.appId");
-    var serviceName = this.get('service');
-    var breadcrumbs = [{
-      text: "Home",
-      routeName: 'application'
-    },{
-      text: "Applications",
-      routeName: 'yarn-apps.apps'
-    }, {
-      text: `App [${appId}]`,
-      href: `#/yarn-app/${appId}`
-    }, {
-      text: "Attempts",
-    }];
-    if (serviceName) {
-      breadcrumbs = [{
-        text: "Home",
-        routeName: 'application'
-      }, {
-        text: "Services",
-        routeName: 'yarn-services'
-      }, {
-        text: `${serviceName} [${appId}]`,
-        href: `#/yarn-app/${appId}?service=${serviceName}`
-      }, {
-        text: "Attempts"
-      }];
-    }
-    return breadcrumbs;
-  })
-
-});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
index e7d65cd..c40697f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
@@ -19,42 +19,44 @@
 import Ember from 'ember';
 
 export default Ember.Controller.extend({
-  queryParams: ["service"],
-  service: undefined,
+  appId: '',
+  serviceName: undefined,
 
-  breadcrumbs: Ember.computed("model.app.id", function () {
-    var appId = this.get("model.app.id");
-    var serviceName = this.get('service');
+  breadcrumbs: [{
+    text: "Home",
+    routeName: 'application'
+  }, {
+    text: "Applications",
+    routeName: 'yarn-apps.apps'
+  }, {
+    text: 'App'
+  }],
+
+  updateBreadcrumbs(appId, serviceName, tailCrumbs) {
     var breadcrumbs = [{
       text: "Home",
       routeName: 'application'
-    },{
-      text: "Applications",
-      routeName: 'yarn-apps.apps'
-    }, {
-      text: `App [${appId}]`,
-      href: `#/yarn-app/${appId}`
     }];
-    if (serviceName) {
-      breadcrumbs = [{
-        text: "Home",
-        routeName: 'application'
-      }, {
+    if (appId && serviceName) {
+      breadcrumbs.push({
         text: "Services",
         routeName: 'yarn-services'
       }, {
         text: `${serviceName} [${appId}]`,
-        href: `#/yarn-app/${appId}?service=${serviceName}`
-      }];
+        href: `#/yarn-app/${appId}/info?service=${serviceName}`
+      });
+    } else {
+      breadcrumbs.push({
+        text: "Applications",
+        routeName: 'yarn-apps.apps'
+      }, {
+        text: `App [${appId}]`,
+        href: `#/yarn-app/${appId}/info`
+      });
     }
-    return breadcrumbs;
-  }),
-
-  amHostHttpAddressFormatted: Ember.computed('model.app.amHostHttpAddress', function() {
-    var amHostAddress = this.get('model.app.amHostHttpAddress');
-    if (amHostAddress && amHostAddress.indexOf('://') < 0) {
-      amHostAddress = 'http://' + amHostAddress;
+    if (tailCrumbs) {
+      breadcrumbs.pushObjects(tailCrumbs);
     }
-    return amHostAddress;
-  })
+    this.set('breadcrumbs', breadcrumbs);
+  }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/attempts.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/attempts.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/attempts.js
new file mode 100644
index 0000000..a6cba9e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/attempts.js
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Controller.extend({
+  queryParams: ["service"],
+  service: undefined
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/charts.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/charts.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/charts.js
new file mode 100644
index 0000000..1078b14
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/charts.js
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Controller.extend({
+  queryParams: ["service"],
+  service: undefined,
+
+  isRunningApp: Ember.computed('model.app.state', function() {
+    return this.get('model.app.state') === "RUNNING";
+  })
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js
new file mode 100644
index 0000000..f9652f9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Controller.extend({
+  queryParams: ["service"],
+  service: undefined,
+
+  amHostHttpAddressFormatted: Ember.computed('model.app.amHostHttpAddress', function() {
+    var amHostAddress = this.get('model.app.amHostHttpAddress');
+    if (amHostAddress && amHostAddress.indexOf('://') < 0) {
+      amHostAddress = 'http://' + amHostAddress;
+    }
+    return amHostAddress;
+  })
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-apps/services.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-apps/services.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-apps/services.js
deleted file mode 100644
index fffaf17..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-apps/services.js
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import Ember from 'ember';
-import TableDefinition from 'em-table/utils/table-definition';
-import AppTableController from '../app-table-columns';
-
-export default AppTableController.extend({
-  queryParams: ['searchText', 'sortColumnId', 'sortOrder', 'pageNum', 'rowCount'],
-  tableDefinition: TableDefinition.create(),
-  searchText: Ember.computed.alias('tableDefinition.searchText'),
-  sortColumnId: Ember.computed.alias('tableDefinition.sortColumnId'),
-  sortOrder: Ember.computed.alias('tableDefinition.sortOrder'),
-  pageNum: Ember.computed.alias('tableDefinition.pageNum'),
-  rowCount: Ember.computed.alias('tableDefinition.rowCount')
-});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-flowrun/info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-flowrun/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-flowrun/info.js
index df2f87e..63e17dc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-flowrun/info.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-flowrun/info.js
@@ -32,7 +32,7 @@ function createColumn() {
     minWidth: "300px",
     getCellContent: function (row) {
       return {
-        routeName: 'yarn-app',
+        routeName: 'yarn-app.info',
         id: row.get('appId'),
         displayText: row.get('appId')
       };

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js
index 00d6d3e..9013142 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js
@@ -26,7 +26,6 @@ var Router = Ember.Router.extend({
 Router.map(function() {
   this.route('yarn-apps', function () {
     this.route('apps');
-    this.route('services');
   });
   this.route('yarn-services');
   this.route('yarn-nodes', function(){
@@ -50,11 +49,14 @@ Router.map(function() {
       '/yarn-container-log/:node_id/:node_addr/:container_id/:filename' });
 
   this.route('cluster-overview');
-  this.route('yarn-app', { path: '/yarn-app/:app_id' });
+  this.route('yarn-app', function() {
+    this.route('info', {path: '/:app_id/info'});
+    this.route('attempts', {path: '/:app_id/attempts'});
+    this.route('charts', {path: '/:app_id/charts'});
+  });
   this.route('yarn-app-attempt', { path: '/yarn-app-attempt/:app_attempt_id'});
   this.route('error');
   this.route('notfound', { path: '*:' });
-  this.route('yarn-app-attempts', { path: '/yarn-app-attempts/:app_id' });
   this.route('yarn-queues', { path: '/yarn-queues/:queue_name' });
 
   this.route('yarn-flow-activity');

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app-attempts.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app-attempts.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app-attempts.js
deleted file mode 100644
index 233bfc8..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app-attempts.js
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import Ember from 'ember';
-import AbstractRoute from './abstract';
-import AppAttemptMixin from 'yarn-ui/mixins/app-attempt';
-
-export default AbstractRoute.extend(AppAttemptMixin, {
-  model(param) {
-    return Ember.RSVP.hash({
-      appId: param.app_id,
-      attempts: this.fetchAttemptListFromRMorATS(param.app_id, this.store)
-    });
-  },
-
-  unloadAll() {
-    this.store.unloadAll('yarn-app-attempt');
-    this.store.unloadAll('yarn-timeline-appattempt');
-  }
-});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app.js
index 98b0cc8..58e3fe3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app.js
@@ -16,35 +16,14 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
 import AbstractRoute from './abstract';
-import AppAttemptMixin from 'yarn-ui/mixins/app-attempt';
 
-export default AbstractRoute.extend(AppAttemptMixin, {
-  model(param) {
-    return Ember.RSVP.hash({
-      app: this.fetchAppInfoFromRMorATS(param.app_id, this.store),
-
-      rmContainers: this.store.find('yarn-app', param.app_id).then(function() {
-        return this.store.query('yarn-app-attempt', {appId: param.app_id}).then(function (attempts) {
-          if (attempts && attempts.get('firstObject')) {
-            var appAttemptId = attempts.get('firstObject').get('appAttemptId');
-            return this.store.query('yarn-container', {
-              app_attempt_id: appAttemptId
-            });
-          }
-        }.bind(this));
-      }.bind(this)),
-
-      nodes: this.store.findAll('yarn-rm-node', {reload: true}),
-    });
-  },
-
-  unloadAll() {
-    this.store.unloadAll('yarn-app');
-    this.store.unloadAll('yarn-app-attempt');
-    this.store.unloadAll('yarn-container');
-    this.store.unloadAll('yarn-rm-node');
-    this.store.unloadAll('yarn-app-timeline');
+export default AbstractRoute.extend({
+  actions: {
+    updateBreadcrumbs(appId, serviceName, tailCrumbs) {
+      var controller = this.controllerFor('yarn-app');
+      controller.setProperties({appId: appId, serviceName: serviceName});
+      controller.updateBreadcrumbs(appId, serviceName, tailCrumbs);
+    }
   }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/attempts.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/attempts.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/attempts.js
new file mode 100644
index 0000000..fcea111
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/attempts.js
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+import AbstractRoute from '../abstract';
+import AppAttemptMixin from 'yarn-ui/mixins/app-attempt';
+
+export default AbstractRoute.extend(AppAttemptMixin, {
+  model(param, transition) {
+    transition.send('updateBreadcrumbs', param.app_id, param.service, [{text: 'Attempts'}]);
+    return Ember.RSVP.hash({
+      appId: param.app_id,
+      serviceName: param.service,
+      attempts: this.fetchAttemptListFromRMorATS(param.app_id, this.store)
+    });
+  },
+
+  unloadAll() {
+    this.store.unloadAll('yarn-app-attempt');
+    this.store.unloadAll('yarn-timeline-appattempt');
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/charts.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/charts.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/charts.js
new file mode 100644
index 0000000..1b687db
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/charts.js
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+import AbstractRoute from '../abstract';
+
+export default AbstractRoute.extend({
+  model(param, transition) {
+    transition.send('updateBreadcrumbs', param.app_id, param.service, [{text: "Charts"}]);
+    return Ember.RSVP.hash({
+      appId: param.app_id,
+      serviceName: param.service,
+
+      app: this.store.find('yarn-app', param.app_id),
+
+      rmContainers: this.store.find('yarn-app', param.app_id).then(function() {
+        return this.store.query('yarn-app-attempt', {appId: param.app_id}).then(function (attempts) {
+          if (attempts && attempts.get('firstObject')) {
+            var appAttemptId = attempts.get('firstObject').get('appAttemptId');
+            return this.store.query('yarn-container', {
+              app_attempt_id: appAttemptId,
+              is_rm: true
+            });
+          }
+        }.bind(this));
+      }.bind(this)),
+
+      nodes: this.store.findAll('yarn-rm-node')
+    });
+  },
+
+  unloadAll() {
+    this.store.unloadAll('yarn-app');
+    this.store.unloadAll('yarn-app-attempt');
+    this.store.unloadAll('yarn-container');
+    this.store.unloadAll('yarn-rm-node');
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/info.js
new file mode 100644
index 0000000..4a4b19e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/info.js
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+import AbstractRoute from '../abstract';
+import AppAttemptMixin from 'yarn-ui/mixins/app-attempt';
+
+export default AbstractRoute.extend(AppAttemptMixin, {
+  model(param, transition) {
+    transition.send('updateBreadcrumbs', param.app_id, param.service);
+    return Ember.RSVP.hash({
+      appId: param.app_id,
+      serviceName: param.service,
+      app: this.fetchAppInfoFromRMorATS(param.app_id, this.store)
+    });
+  },
+
+  unloadAll() {
+    this.store.unloadAll('yarn-app');
+    this.store.unloadAll('yarn-app-timeline');
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
deleted file mode 100644
index 34ad1ad..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-apps/services.js
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import Ember from 'ember';
-
-export default Ember.Route.extend({
-  model() {
-      return Ember.RSVP.hash({
-        apps: this.store.query('yarn-app', {
-          applicationTypes: "org-apache-slider"
-      }),
-    });
-  },
-
-  unloadAll() {
-    this.store.unloadAll('yarn-app');
-  }
-});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
index c3154b4..e988e0c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
@@ -46,7 +46,7 @@
               <span class="sr-only">(current)</span>
             {{/link-to}}
           {{/link-to}}
-          {{#link-to 'yarn-apps.apps' tagName="li" current-when="yarn-apps.apps yarn-apps.services"}}
+          {{#link-to 'yarn-apps.apps' tagName="li" current-when="yarn-apps.apps"}}
             {{#link-to 'yarn-apps.apps' class="navigation-link"}}Applications
               <span class="sr-only">(current)</span>
             {{/link-to}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-table.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-table.hbs
index a036a0c..3bad063 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-table.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-table.hbs
@@ -38,7 +38,7 @@
     {{#if arr}}
       {{#each arr as |app|}}
         <tr>
-          <td><a href="#/yarn-app/{{app.id}}">{{app.id}}</a></td>
+          <td><a href="#/yarn-app/{{app.id}}/info">{{app.id}}</a></td>
           <td>{{app.applicationType}}</td>
           <td>{{app.appName}}</td>
           <td>{{app.user}}</td>
@@ -61,7 +61,7 @@
       {{/each}}
     {{else}}
       <tr>
-          <td><a href="#/yarn-app/{{app.id}}">{{app.id}}</a></td>
+          <td><a href="#/yarn-app/{{app.id}}/info">{{app.id}}</a></td>
           <td>{{app.applicationType}}</td>
           <td>{{app.appName}}</td>
           <td>{{app.user}}</td>
@@ -83,4 +83,4 @@
       </tr>
     {{/if}}
   </tbody>
-</table>
\ No newline at end of file
+</table>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app-attempts.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app-attempts.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app-attempts.hbs
deleted file mode 100644
index 283d78a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app-attempts.hbs
+++ /dev/null
@@ -1,58 +0,0 @@
-{{!
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-}}
-
-{{breadcrumb-bar breadcrumbs=breadcrumbs}}
-
-<div class="col-md-12 container-fluid">
-  <div class="row">
-
-    <div class="col-md-2 container-fluid">
-      <div class="panel panel-default">
-        <div class="panel-heading">
-          {{#if service}}
-            Service
-          {{else}}
-            Application
-          {{/if}}
-        </div>
-        <div class="panel-body">
-          <ul class="nav nav-pills nav-stacked" id="stacked-menu">
-            <ul class="nav nav-pills nav-stacked collapse in">
-              {{#link-to 'yarn-app' tagName="li"}}
-                {{#link-to 'yarn-app' model.appId (query-params service=service)}}Information
-                {{/link-to}}
-              {{/link-to}}
-              {{#link-to 'yarn-app-attempts' tagName="li"}}
-                {{#link-to 'yarn-app-attempts' model.appId (query-params service=service)}}Attempts List
-                {{/link-to}}
-              {{/link-to}}
-            </ul>
-          </ul>
-        </div>
-      </div>
-    </div>
-
-    <div class="col-md-10 container-fluid">
-      <div class="row">
-         {{timeline-view parent-id="attempt-timeline-div" my-id="timeline-view" height="100%" rmModel=model.attempts label="shortAppAttemptId" attemptModel=true serviceName=service}}
-      </div>
-    </div>
-
-  </div>
-</div>
-{{outlet}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
index c1f5352..2fb5ab3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
@@ -18,14 +18,13 @@
 
 {{breadcrumb-bar breadcrumbs=breadcrumbs}}
 
-{{#if model.app}}
 <div class="col-md-12 container-fluid">
   <div class="row">
 
     <div class="col-md-2 container-fluid">
       <div class="panel panel-default">
         <div class="panel-heading">
-          {{#if service}}
+          {{#if serviceName}}
             Service
           {{else}}
             Application
@@ -34,13 +33,14 @@
         <div class="panel-body">
           <ul class="nav nav-pills nav-stacked" id="stacked-menu">
             <ul class="nav nav-pills nav-stacked collapse in">
-              {{#link-to 'yarn-app' tagName="li"}}
-                {{#link-to 'yarn-app' model.app.id (query-params service=service)}}Information
-                {{/link-to}}
+              {{#link-to 'yarn-app.info' tagName="li" class=(if (eq target.currentPath 'yarn-app.info') "active")}}
+                {{#link-to 'yarn-app.info' appId (query-params service=serviceName)}}Information{{/link-to}}
               {{/link-to}}
-              {{#link-to 'yarn-app-attempts' tagName="li"}}
-                {{#link-to 'yarn-app-attempts' model.app.id (query-params service=service)}}Attempts List
-                {{/link-to}}
+              {{#link-to 'yarn-app.attempts' tagName="li" class=(if (eq target.currentPath 'yarn-app.attempts') "active")}}
+                {{#link-to 'yarn-app.attempts' appId (query-params service=serviceName)}}Attempts List{{/link-to}}
+              {{/link-to}}
+              {{#link-to 'yarn-app.charts' tagName="li" class=(if (eq target.currentPath 'yarn-app.charts') "active")}}
+                {{#link-to 'yarn-app.charts' appId (query-params service=serviceName)}}Resource Usage{{/link-to}}
               {{/link-to}}
             </ul>
           </ul>
@@ -49,174 +49,7 @@
     </div>
 
     <div class="col-md-10 container-fluid">
-      <div class="row">
-        <div class="col-md-12">
-          {{app-timeout-bar app=model.app}}
-        </div>
-      </div>
-
-      <div class="row">
-        <div class="col-md-12 container-fluid">
-          <div class="panel panel-default">
-            <div class="panel-heading">Basic Info</div>
-            <div class="x-scroll">
-              <table class="display table table-striped table-bordered"
-                     cellspacing="0" width="100%">
-                <thead>
-                  <tr>
-                    <th>Application ID</th>
-                    <th>Name</th>
-                    <th>User</th>
-                    <th>Queue</th>
-                    <th>State</th>
-                    <th>Final Status</th>
-                    <th>Start Time</th>
-                    <th>Elapsed Time</th>
-                    <th>Finished Time</th>
-                    <th>Priority</th>
-                    <th>Progress</th>
-                    <th>Is Unmanaged AM</th>
-                  </tr>
-                </thead>
-
-                <tbody>
-                  <tr>
-                    <td>{{model.app.id}}</td>
-                    <td>{{model.app.appName}}</td>
-                    <td>{{model.app.user}}</td>
-                    <td>{{model.app.queue}}</td>
-                    <td>{{model.app.state}}</td>
-                    <td>
-                      <span class={{model.app.finalStatusStyle}}>
-                        {{model.app.finalStatus}}
-                      </span>
-                    </td>
-                    <td>{{model.app.startTime}}</td>
-                    <td>{{model.app.formattedElapsedTime}}</td>
-                    <td>{{model.app.validatedFinishedTs}}</td>
-                    <td>{{model.app.priority}}</td>
-                    <td>
-                      <div class="progress" style="margin-bottom: 0;">
-                        <div class="progress-bar" role="progressbar"
-                             aria-valuenow="60" aria-valuemin="0"
-                             aria-valuemax="100"
-                             style={{model.app.progressStyle}}>
-                          {{model.app.progress}}%
-                        </div>
-                      </div>
-                    </td>
-                    <td>{{model.app.unmanagedApplication}}</td>
-                  </tr>
-                </tbody>
-              </table>
-            </div>
-          </div>
-        </div>
-      </div>
-
-      <div class="row">
-        {{#if model.app.diagnostics}}
-          <div class="col-md-12 container-fluid">
-            {{#if model.app.isFailed}}
-              <div class="panel panel-danger">
-                <div class="panel-heading">
-                  Diagnostics
-                </div>
-                <div class="panel-body">{{model.app.diagnostics}}</div>
-              </div>
-            {{else}}
-              <div class="panel panel-default">
-                <div class="panel-heading">
-                  Diagnostics
-                </div>
-                <div class="panel-body">{{model.app.diagnostics}}</div>
-              </div>
-            {{/if}}
-          </div>
-        {{/if}}
-      </div>
-
-      <div class="row">
-        <div class="col-md-8 container-fluid">
-          <div class="panel panel-default">
-            <div class="panel-heading">Scheduling Info</div>
-            <table class="display table table-striped table-bordered"
-                   cellspacing="0" width="100%">
-              <thead>
-              <tr>
-                <th>Allocated Resource</th>
-                <th>Running Containers</th>
-                <th>Preempted Resource</th>
-                <th>Num Non-AM container preempted</th>
-                <th>Num AM container preempted</th>
-                <th>Aggregated Resource Usage</th>
-              </tr>
-              </thead>
-
-              <tbody>
-              <tr>
-                <td>{{model.app.allocatedResource}}</td>
-                <td>{{model.app.runningContainersNumber}}</td>
-                <td>{{model.app.preemptedResource}}</td>
-                <td>{{model.app.numAMContainerPreempted}}</td>
-                <td>{{model.app.numAMContainerPreempted}}</td>
-                <td>{{model.app.aggregatedResourceUsage}}</td>
-              </tr>
-              </tbody>
-            </table>
-          </div>
-        </div>
-
-        <div class="col-md-4 container-fluid">
-          <div class="panel panel-default">
-            <div class="panel-heading">Application Master Info</div>
-            <table class="display table table-striped table-bordered"
-                   cellspacing="0" width="100%">
-              <thead>
-              <tr>
-                <th>Master Container Log</th>
-                <th>Master Node</th>
-                <th>Master Node Label Expression</th>
-              </tr>
-              </thead>
-
-              <tbody>
-              <tr>
-                <td><a href="{{model.app.amContainerLogs}}" target="_blank">Link</a></td>
-                <td><a href="{{amHostHttpAddressFormatted}}" target="_blank">Link</a></td>
-                <td>{{model.app.amNodeLabelExpression}}</td>
-              </tr>
-              </tbody>
-            </table>
-          </div>
-        </div>
-      </div>
-
-      {{#if model.nodes}}
-        {{#if model.rmContainers}}
-          <div class="row" id="stackd-bar-chart-mem">
-            {{per-app-memusage-by-nodes-stacked-barchart
-            nodes=model.nodes
-            rmContainers=model.rmContainers
-            parentId="stackd-bar-chart-mem"
-            title=(concat 'Memory usage by nodes for: [' model.app.id ']')}}
-          </div>
-
-          <hr>
-
-          <div class="row" id="stackd-bar-chart-ncontainer">
-            {{per-app-ncontainers-by-nodes-stacked-barchart
-            nodes=model.nodes
-            rmContainers=model.rmContainers
-            parentId="stackd-bar-chart-ncontainer"
-            title=(concat 'Running #Containers by nodes for: [' model.app.id ']')}}
-          </div>
-        {{/if}}
-      {{/if}}
-
-
+      {{outlet}}
     </div>
   </div>
 </div>
-{{/if}}
-{{outlet}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
new file mode 100644
index 0000000..81896e2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/attempts.hbs
@@ -0,0 +1,29 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="row">
+  {{timeline-view
+    parent-id="attempt-timeline-div"
+    my-id="timeline-view"
+    height="100%"
+    rmModel=model.attempts
+    label="shortAppAttemptId"
+    attemptModel=true
+    serviceName=model.serviceName
+  }}
+</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/charts.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/charts.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/charts.hbs
new file mode 100644
index 0000000..8d3388a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/charts.hbs
@@ -0,0 +1,43 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="row">
+  <div class="col-md-12 container-fluid">
+    {{#if isRunningApp}}
+      <div class="row" id="stackd-bar-chart-mem">
+        {{per-app-memusage-by-nodes-stacked-barchart
+        nodes=model.nodes
+        rmContainers=model.rmContainers
+        parentId="stackd-bar-chart-mem"
+        title=(concat 'Memory usage by nodes for: [' model.appId ']')}}
+      </div>
+      <hr>
+      <div class="row" id="stackd-bar-chart-ncontainer">
+        {{per-app-ncontainers-by-nodes-stacked-barchart
+        nodes=model.nodes
+        rmContainers=model.rmContainers
+        parentId="stackd-bar-chart-ncontainer"
+        title=(concat 'Running #Containers by nodes for: [' model.appId ']')}}
+      </div>
+    {{else}}
+      <div class="panel panel-default">
+        <h4 class="text-center">No resource usage data is available for this application!</h4>
+      </div>
+    {{/if}}
+  </div>
+</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs
new file mode 100644
index 0000000..3cfec33
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs
@@ -0,0 +1,167 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="row">
+  <div class="col-md-12">
+    {{app-timeout-bar app=model.app}}
+  </div>
+</div>
+
+<div class="row">
+  <div class="col-md-12 container-fluid">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        Basic Info
+      </div>
+      <div class="x-scroll">
+        <table class="display table table-striped table-bordered"
+               cellspacing="0" width="100%">
+          <thead>
+            <tr>
+              <th>Application ID</th>
+              <th>Name</th>
+              <th>User</th>
+              <th>Queue</th>
+              <th>State</th>
+              <th>Final Status</th>
+              <th>Start Time</th>
+              <th>Elapsed Time</th>
+              <th>Finished Time</th>
+              <th>Priority</th>
+              {{#unless model.serviceName}}
+                <th>Progress</th>
+                <th>Is Unmanaged AM</th>
+              {{/unless}}
+            </tr>
+          </thead>
+
+          <tbody>
+            <tr>
+              <td>{{model.app.id}}</td>
+              <td>{{model.app.appName}}</td>
+              <td>{{model.app.user}}</td>
+              <td>{{model.app.queue}}</td>
+              <td>{{model.app.state}}</td>
+              <td>
+                <span class={{model.app.finalStatusStyle}}>
+                  {{model.app.finalStatus}}
+                </span>
+              </td>
+              <td>{{model.app.startTime}}</td>
+              <td>{{model.app.formattedElapsedTime}}</td>
+              <td>{{model.app.validatedFinishedTs}}</td>
+              <td>{{model.app.priority}}</td>
+              {{#unless model.serviceName}}
+                <td>
+                  <div class="progress" style="margin-bottom: 0;">
+                    <div class="progress-bar" role="progressbar"
+                     aria-valuenow="60" aria-valuemin="0"
+                     aria-valuemax="100"
+                     style={{model.app.progressStyle}}>
+                    {{model.app.progress}}%
+                    </div>
+                  </div>
+                </td>
+                <td>{{model.app.unmanagedApplication}}</td>
+              {{/unless}}
+            </tr>
+          </tbody>
+        </table>
+      </div>
+    </div>
+  </div>
+</div>
+
+<div class="row">
+  {{#if model.app.diagnostics}}
+    <div class="col-md-12 container-fluid">
+      {{#if model.app.isFailed}}
+        <div class="panel panel-danger">
+          <div class="panel-heading">
+            Diagnostics
+          </div>
+          <div class="panel-body">{{model.app.diagnostics}}</div>
+        </div>
+      {{else}}
+        <div class="panel panel-default">
+          <div class="panel-heading">
+            Diagnostics
+          </div>
+          <div class="panel-body">{{model.app.diagnostics}}</div>
+        </div>
+      {{/if}}
+    </div>
+  {{/if}}
+</div>
+
+<div class="row">
+  <div class="col-md-12 container-fluid">
+    <div class="panel panel-default">
+      <div class="panel-heading">Scheduling Info</div>
+      <table class="display table table-striped table-bordered"
+             cellspacing="0" width="100%">
+        <thead>
+          <tr>
+            <th>Allocated Resource</th>
+            <th>Running Containers</th>
+            <th>Preempted Resource</th>
+            <th>Num Non-AM container preempted</th>
+            <th>Num AM container preempted</th>
+            <th>Aggregated Resource Usage</th>
+          </tr>
+        </thead>
+        <tbody>
+          <tr>
+            <td>{{model.app.allocatedResource}}</td>
+            <td>{{model.app.runningContainersNumber}}</td>
+            <td>{{model.app.preemptedResource}}</td>
+            <td>{{model.app.numAMContainerPreempted}}</td>
+            <td>{{model.app.numAMContainerPreempted}}</td>
+            <td>{{model.app.aggregatedResourceUsage}}</td>
+          </tr>
+        </tbody>
+      </table>
+    </div>
+  </div>
+
+</div>
+
+<div class="row">
+  <div class="col-md-6 container-fluid">
+    <div class="panel panel-default">
+      <div class="panel-heading">Application Master Info</div>
+      <table class="display table table-striped table-bordered"
+             cellspacing="0" width="100%">
+        <thead>
+          <tr>
+            <th>Master Container Log</th>
+            <th>Master Node</th>
+            <th>Master Node Label Expression</th>
+          </tr>
+        </thead>
+        <tbody>
+          <tr>
+            <td><a href="{{model.app.amContainerLogs}}" target="_blank">Link</a></td>
+            <td><a href="{{amHostHttpAddressFormatted}}" target="_blank">Link</a></td>
+            <td>{{model.app.amNodeLabelExpression}}</td>
+          </tr>
+        </tbody>
+      </table>
+    </div>
+  </div>
+</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/loading.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/loading.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/loading.hbs
new file mode 100644
index 0000000..a95af2b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/loading.hbs
@@ -0,0 +1,23 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="col-md-12 container-fluid">
+  <div class="loading-mask">
+    <img src="assets/images/spinner.gif" alt="Loading...">
+  </div>
+</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-apps.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-apps.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-apps.hbs
index 264e6b6..00701d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-apps.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-apps.hbs
@@ -24,14 +24,13 @@
     <div class="col-md-2 container-fluid">
       <div class="panel panel-default">
         <div class="panel-heading">
-          <h4>Applications</h4>
+          Applications
         </div>
         <div class="panel-body">
           <ul class="nav nav-pills nav-stacked" id="stacked-menu">
             <ul class="nav nav-pills nav-stacked collapse in">
               {{#link-to 'yarn-apps.apps' tagName="li"}}
-                {{#link-to 'yarn-apps.apps'}}All Applications
-                {{/link-to}}
+                {{#link-to 'yarn-apps.apps'}}All Applications{{/link-to}}
               {{/link-to}}
             </ul>
           </ul>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-apps/services.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-apps/services.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-apps/services.hbs
deleted file mode 100644
index d56762c..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-apps/services.hbs
+++ /dev/null
@@ -1,25 +0,0 @@
-{{!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
---}}
-
-{{#if model.apps}}
-  {{em-table columns=columns rows=model.apps definition=tableDefinition}}
-{{else}}
-  <h4 align="center">Could not find any applications from this cluster</h4>
-{{/if}}
-
-{{outlet}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-services.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-services.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-services.hbs
index 9637a93..04788be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-services.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-services.hbs
@@ -31,8 +31,7 @@
           <ul class="nav nav-pills nav-stacked" id="stacked-menu">
             <ul class="nav nav-pills nav-stacked collapse in">
               {{#link-to 'yarn-services' tagName="li"}}
-                {{#link-to 'yarn-services'}}Long Running Services
-                {{/link-to}}
+                {{#link-to 'yarn-services'}}Long Running Services{{/link-to}}
               {{/link-to}}
             </ul>
           </ul>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app-attempts-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app-attempts-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app-attempts-test.js
deleted file mode 100644
index 3894db2..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app-attempts-test.js
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import { moduleFor, test } from 'ember-qunit';
-
-moduleFor('controller:yarn-app-attempts', 'Unit | Controller | yarn app attempts', {
-  // Specify the other units that are required for this test.
-  // needs: ['controller:foo']
-});
-
-// Replace this with your real tests.
-test('it exists', function(assert) {
-  let controller = this.subject();
-  assert.ok(controller);
-});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/attempts-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/attempts-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/attempts-test.js
new file mode 100644
index 0000000..b8bad85
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/attempts-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('controller:yarn-app/attempts', 'Unit | Controller | yarn app/attempts', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+// Replace this with your real tests.
+test('it exists', function(assert) {
+  let controller = this.subject();
+  assert.ok(controller);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/charts-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/charts-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/charts-test.js
new file mode 100644
index 0000000..91acb6c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/charts-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('controller:yarn-app/charts', 'Unit | Controller | yarn app/charts', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+// Replace this with your real tests.
+test('it exists', function(assert) {
+  let controller = this.subject();
+  assert.ok(controller);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/info-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/info-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/info-test.js
new file mode 100644
index 0000000..910d3ef
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/info-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('controller:yarn-app/info', 'Unit | Controller | yarn app/info', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+// Replace this with your real tests.
+test('it exists', function(assert) {
+  let controller = this.subject();
+  assert.ok(controller);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-apps/services-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-apps/services-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-apps/services-test.js
deleted file mode 100644
index d0a1f85..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-apps/services-test.js
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import { moduleFor, test } from 'ember-qunit';
-
-moduleFor('controller:yarn-apps/services', 'Unit | Controller | yarn apps/services', {
-  // Specify the other units that are required for this test.
-  // needs: ['controller:foo']
-});
-
-// Replace this with your real tests.
-test('it exists', function(assert) {
-  let controller = this.subject();
-  assert.ok(controller);
-});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app-attempts-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app-attempts-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app-attempts-test.js
deleted file mode 100644
index 917a159..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app-attempts-test.js
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import { moduleFor, test } from 'ember-qunit';
-
-moduleFor('route:yarn-app-attempts', 'Unit | Route | yarn app attempts', {
-  // Specify the other units that are required for this test.
-  // needs: ['controller:foo']
-});
-
-test('it exists', function(assert) {
-  let route = this.subject();
-  assert.ok(route);
-});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/attempts-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/attempts-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/attempts-test.js
new file mode 100644
index 0000000..eaf2f65
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/attempts-test.js
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('route:yarn-app/attempts', 'Unit | Route | yarn app/attempts', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+test('it exists', function(assert) {
+  let route = this.subject();
+  assert.ok(route);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/charts-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/charts-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/charts-test.js
new file mode 100644
index 0000000..1284c6f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/charts-test.js
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('route:yarn-app/charts', 'Unit | Route | yarn app/charts', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+test('it exists', function(assert) {
+  let route = this.subject();
+  assert.ok(route);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/info-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/info-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/info-test.js
new file mode 100644
index 0000000..f7dd6c8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/info-test.js
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('route:yarn-app/info', 'Unit | Route | yarn app/info', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+test('it exists', function(assert) {
+  let route = this.subject();
+  assert.ok(route);
+});


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: YARN-5948. Implement MutableConfigurationManager for handling storage into configuration store

Posted by xg...@apache.org.
YARN-5948. Implement MutableConfigurationManager for handling storage into configuration store


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/630ee7e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/630ee7e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/630ee7e7

Branch: refs/heads/YARN-5734
Commit: 630ee7e7d95daaf19a700d57acaf895f2dea9c72
Parents: ae9fc25
Author: Jonathan Hung <jh...@linkedin.com>
Authored: Wed Mar 1 16:03:01 2017 -0800
Committer: Xuan <xg...@apache.org>
Committed: Mon Jun 5 14:03:16 2017 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |  6 ++
 .../src/main/resources/yarn-default.xml         | 12 +++
 .../scheduler/MutableConfigurationProvider.java | 35 ++++++++
 .../scheduler/capacity/CapacityScheduler.java   | 14 ++-
 .../CapacitySchedulerConfiguration.java         |  3 +
 .../capacity/conf/CSConfigurationProvider.java  |  3 +-
 .../conf/MutableCSConfigurationProvider.java    | 94 ++++++++++++++++++++
 .../conf/YarnConfigurationStoreFactory.java     | 46 ++++++++++
 .../TestMutableCSConfigurationProvider.java     | 83 +++++++++++++++++
 9 files changed, 291 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/630ee7e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 5e4c826..fa9e79d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -607,6 +607,12 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS =
       "org.apache.hadoop.yarn.LocalConfigurationProvider";
 
+  public static final String SCHEDULER_CONFIGURATION_STORE_CLASS =
+      YARN_PREFIX + "scheduler.configuration.store.class";
+  public static final String MEMORY_CONFIGURATION_STORE = "memory";
+  public static final String DEFAULT_CONFIGURATION_STORE =
+      MEMORY_CONFIGURATION_STORE;
+
   public static final String YARN_AUTHORIZATION_PROVIDER = YARN_PREFIX
       + "authorization-provider";
   private static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS_HTTP =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/630ee7e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index e687eef..02284e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3108,4 +3108,16 @@
     <value>false</value>
   </property>
 
+  <property>
+    <description>
+      The type of configuration store to use for storing scheduler
+      configurations, if using a mutable configuration provider.
+      Keywords such as "memory" map to certain configuration store
+      implementations. If keyword is not found, try to load this
+      value as a class.
+    </description>
+    <name>yarn.scheduler.configuration.store.class</name>
+    <value>memory</value>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/630ee7e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
new file mode 100644
index 0000000..da30a2b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import java.util.Map;
+
+/**
+ * Interface for allowing changing scheduler configurations.
+ */
+public interface MutableConfigurationProvider {
+
+  /**
+   * Update the scheduler configuration with the provided key value pairs.
+   * @param user User issuing the request
+   * @param confUpdate Key-value pairs for configurations to be updated.
+   */
+  void mutateConfiguration(String user, Map<String, String> confUpdate);
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/630ee7e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index a925904..9353df2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -105,6 +105,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Activi
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.AllocationState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.CSConfigurationProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.FileBasedCSConfigurationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.MutableCSConfigurationProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.KillableContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
@@ -290,10 +291,15 @@ public class CapacityScheduler extends
       String confProviderStr = configuration.get(
           CapacitySchedulerConfiguration.CS_CONF_PROVIDER,
           CapacitySchedulerConfiguration.DEFAULT_CS_CONF_PROVIDER);
-      if (confProviderStr.equals(
-          CapacitySchedulerConfiguration.FILE_CS_CONF_PROVIDER)) {
-        this.csConfProvider = new FileBasedCSConfigurationProvider(rmContext);
-      } else {
+      switch (confProviderStr) {
+      case CapacitySchedulerConfiguration.FILE_CS_CONF_PROVIDER:
+        this.csConfProvider =
+            new FileBasedCSConfigurationProvider(rmContext);
+        break;
+      case CapacitySchedulerConfiguration.STORE_CS_CONF_PROVIDER:
+        this.csConfProvider = new MutableCSConfigurationProvider(rmContext);
+        break;
+      default:
         throw new IOException("Invalid CS configuration provider: " +
             confProviderStr);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/630ee7e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 6dd7aef..de72669 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -300,6 +300,9 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
   public static final String FILE_CS_CONF_PROVIDER = "file";
 
   @Private
+  public static final String STORE_CS_CONF_PROVIDER = "store";
+
+  @Private
   public static final String DEFAULT_CS_CONF_PROVIDER = FILE_CS_CONF_PROVIDER;
 
   AppPriorityACLConfigurationParser priorityACLConfig = new AppPriorityACLConfigurationParser();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/630ee7e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
index c9984ac..0d2c8bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
@@ -32,8 +32,9 @@ public interface CSConfigurationProvider {
   /**
    * Initialize the configuration provider with given conf.
    * @param conf configuration to initialize with
+   * @throws IOException if initialization fails due to misconfiguration
    */
-  void init(Configuration conf);
+  void init(Configuration conf) throws IOException;
 
   /**
    * Loads capacity scheduler configuration object.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/630ee7e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
new file mode 100644
index 0000000..267ab6a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfigurationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore.LogMutation;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * CS configuration provider which implements
+ * {@link MutableConfigurationProvider} for modifying capacity scheduler
+ * configuration.
+ */
+public class MutableCSConfigurationProvider implements CSConfigurationProvider,
+    MutableConfigurationProvider {
+
+  private Configuration schedConf;
+  private YarnConfigurationStore confStore;
+  private RMContext rmContext;
+  private Configuration conf;
+
+  public MutableCSConfigurationProvider(RMContext rmContext) {
+    this.rmContext = rmContext;
+  }
+
+  @Override
+  public void init(Configuration config) throws IOException {
+    String store = config.get(
+        YarnConfiguration.SCHEDULER_CONFIGURATION_STORE_CLASS,
+        YarnConfiguration.DEFAULT_CONFIGURATION_STORE);
+    switch (store) {
+    case YarnConfiguration.MEMORY_CONFIGURATION_STORE:
+      this.confStore = new InMemoryConfigurationStore();
+      break;
+    default:
+      this.confStore = YarnConfigurationStoreFactory.getStore(config);
+      break;
+    }
+    Configuration initialSchedConf = new Configuration(false);
+    initialSchedConf.addResource(YarnConfiguration.CS_CONFIGURATION_FILE);
+    this.schedConf = initialSchedConf;
+    confStore.initialize(config, initialSchedConf);
+    this.conf = config;
+  }
+
+  @Override
+  public CapacitySchedulerConfiguration loadConfiguration(Configuration
+      configuration) throws IOException {
+    Configuration loadedConf = new Configuration(configuration);
+    loadedConf.addResource(schedConf);
+    return new CapacitySchedulerConfiguration(loadedConf, false);
+  }
+
+  @Override
+  public void mutateConfiguration(String user,
+      Map<String, String> confUpdate) {
+    Configuration oldConf = new Configuration(schedConf);
+    LogMutation log = new LogMutation(confUpdate, user);
+    long id = confStore.logMutation(log);
+    for (Map.Entry<String, String> kv : confUpdate.entrySet()) {
+      schedConf.set(kv.getKey(), kv.getValue());
+    }
+    try {
+      rmContext.getScheduler().reinitialize(conf, rmContext);
+    } catch (IOException e) {
+      schedConf = oldConf;
+      confStore.confirmMutation(id, false);
+      return;
+    }
+    confStore.confirmMutation(id, true);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/630ee7e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStoreFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStoreFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStoreFactory.java
new file mode 100644
index 0000000..60249c8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStoreFactory.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+/**
+ * Factory class for creating instances of {@link YarnConfigurationStore}.
+ */
+public final class YarnConfigurationStoreFactory {
+
+  private static final Log LOG = LogFactory.getLog(
+      YarnConfigurationStoreFactory.class);
+
+  private YarnConfigurationStoreFactory() {
+    // Unused.
+  }
+
+  public static YarnConfigurationStore getStore(Configuration conf) {
+    Class<? extends YarnConfigurationStore> storeClass =
+        conf.getClass(YarnConfiguration.SCHEDULER_CONFIGURATION_STORE_CLASS,
+            InMemoryConfigurationStore.class, YarnConfigurationStore.class);
+    LOG.info("Using YarnConfigurationStore implementation - " + storeClass);
+    return ReflectionUtils.newInstance(storeClass, conf);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/630ee7e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
new file mode 100644
index 0000000..3f103b1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests {@link MutableCSConfigurationProvider}.
+ */
+public class TestMutableCSConfigurationProvider {
+
+  private MutableCSConfigurationProvider confProvider;
+  private RMContext rmContext;
+  private Map<String, String> goodUpdate;
+  private Map<String, String> badUpdate;
+  private CapacityScheduler cs;
+
+  private static final String TEST_USER = "testUser";
+
+  @Before
+  public void setUp() {
+    cs = mock(CapacityScheduler.class);
+    rmContext = mock(RMContext.class);
+    when(rmContext.getScheduler()).thenReturn(cs);
+    confProvider = new MutableCSConfigurationProvider(rmContext);
+    goodUpdate = new HashMap<>();
+    goodUpdate.put("goodKey", "goodVal");
+    badUpdate = new HashMap<>();
+    badUpdate.put("badKey", "badVal");
+  }
+
+  @Test
+  public void testInMemoryBackedProvider() throws IOException {
+    Configuration conf = new Configuration();
+    confProvider.init(conf);
+    assertNull(confProvider.loadConfiguration(conf)
+        .get("goodKey"));
+
+    doNothing().when(cs).reinitialize(any(Configuration.class),
+        any(RMContext.class));
+    confProvider.mutateConfiguration(TEST_USER, goodUpdate);
+    assertEquals("goodVal", confProvider.loadConfiguration(conf)
+        .get("goodKey"));
+
+    assertNull(confProvider.loadConfiguration(conf).get("badKey"));
+    doThrow(new IOException()).when(cs).reinitialize(any(Configuration.class),
+        any(RMContext.class));
+    confProvider.mutateConfiguration(TEST_USER, badUpdate);
+    assertNull(confProvider.loadConfiguration(conf).get("badKey"));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: HADOOP-14458. Add missing imports to TestAliyunOSSFileSystemContract.java. Contributed by Mingliang Liu.

Posted by xg...@apache.org.
HADOOP-14458. Add missing imports to TestAliyunOSSFileSystemContract.java. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07e60f85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07e60f85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07e60f85

Branch: refs/heads/YARN-5734
Commit: 07e60f85d87ca9a585d351a308ee0ecfa9293750
Parents: d4015f8
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue May 30 15:11:10 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue May 30 15:11:10 2017 +0900

----------------------------------------------------------------------
 .../fs/aliyun/oss/TestAliyunOSSFileSystemContract.java   | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07e60f85/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
index 419ddee..321e958 100644
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
+++ b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
@@ -20,17 +20,22 @@ package org.apache.hadoop.fs.aliyun.oss;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
+
 import org.junit.Before;
 import org.junit.Test;
 
-import static org.junit.Assume.*;
-import org.apache.hadoop.fs.FileStatus;
-
 import java.io.FileNotFoundException;
 import java.io.IOException;
 
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeNotNull;
+import static org.junit.Assume.assumeTrue;
+
 /**
  * Tests a live Aliyun OSS system.
  *


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: YARN-5949. Add pluggable configuration ACL policy interface and implementation. (Jonathan Hung via wangda)

Posted by xg...@apache.org.
YARN-5949. Add pluggable configuration ACL policy interface and implementation. (Jonathan Hung via wangda)

Change-Id: Ib98e82ff753bede21fcab2e6ca9ec1e7a5a2008f


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac788fd4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac788fd4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac788fd4

Branch: refs/heads/YARN-5734
Commit: ac788fd475eabf1db5c2d5b5ad00244cf7630335
Parents: 7003188
Author: Wangda Tan <wa...@apache.org>
Authored: Mon May 22 13:38:31 2017 -0700
Committer: Xuan <xg...@apache.org>
Committed: Mon Jun 5 14:03:18 2017 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |   3 +
 .../src/main/resources/yarn-default.xml         |  11 ++
 .../ConfigurationMutationACLPolicy.java         |  47 ++++++
 .../ConfigurationMutationACLPolicyFactory.java  |  49 ++++++
 .../DefaultConfigurationMutationACLPolicy.java  |  45 ++++++
 .../scheduler/MutableConfScheduler.java         |  19 ++-
 .../scheduler/MutableConfigurationProvider.java |   8 +-
 .../scheduler/capacity/CapacityScheduler.java   |   6 +-
 .../conf/MutableCSConfigurationProvider.java    | 151 +++++++++++++++++-
 ...ueueAdminConfigurationMutationACLPolicy.java |  96 ++++++++++++
 .../resourcemanager/webapp/RMWebServices.java   | 132 +---------------
 .../TestConfigurationMutationACLPolicies.java   | 154 +++++++++++++++++++
 .../TestMutableCSConfigurationProvider.java     |  40 +++--
 13 files changed, 610 insertions(+), 151 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac788fd4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index fa9e79d..6e0cd32 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -613,6 +613,9 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_CONFIGURATION_STORE =
       MEMORY_CONFIGURATION_STORE;
 
+  public static final String RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS =
+      YARN_PREFIX + "scheduler.configuration.mutation.acl-policy.class";
+
   public static final String YARN_AUTHORIZATION_PROVIDER = YARN_PREFIX
       + "authorization-provider";
   private static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS_HTTP =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac788fd4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 02284e6..5f2769e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3120,4 +3120,15 @@
     <value>memory</value>
   </property>
 
+  <property>
+    <description>
+      The class to use for configuration mutation ACL policy if using a mutable
+      configuration provider. Controls whether a mutation request is allowed.
+      The DefaultConfigurationMutationACLPolicy checks if the requestor is a
+      YARN admin.
+    </description>
+    <name>yarn.scheduler.configuration.mutation.acl-policy.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.DefaultConfigurationMutationACLPolicy</value>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac788fd4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
new file mode 100644
index 0000000..724487b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+
+/**
+ * Interface for determining whether configuration mutations are allowed.
+ */
+public interface ConfigurationMutationACLPolicy {
+
+  /**
+   * Initialize ACL policy with configuration and RMContext.
+   * @param conf Configuration to initialize with.
+   * @param rmContext rmContext
+   */
+  void init(Configuration conf, RMContext rmContext);
+
+  /**
+   * Check if mutation is allowed.
+   * @param user User issuing the request
+   * @param confUpdate configurations to be updated
+   * @return whether provided mutation is allowed or not
+   */
+  boolean isMutationAllowed(UserGroupInformation user, QueueConfigsUpdateInfo
+      confUpdate);
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac788fd4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicyFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicyFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicyFactory.java
new file mode 100644
index 0000000..2898785
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicyFactory.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+/**
+ * Factory class for creating instances of
+ * {@link ConfigurationMutationACLPolicy}.
+ */
+public final class ConfigurationMutationACLPolicyFactory {
+
+  private static final Log LOG = LogFactory.getLog(
+      ConfigurationMutationACLPolicyFactory.class);
+
+  private ConfigurationMutationACLPolicyFactory() {
+    // Unused.
+  }
+
+  public static ConfigurationMutationACLPolicy getPolicy(Configuration conf) {
+    Class<? extends ConfigurationMutationACLPolicy> policyClass =
+        conf.getClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+            DefaultConfigurationMutationACLPolicy.class,
+            ConfigurationMutationACLPolicy.class);
+    LOG.info("Using ConfigurationMutationACLPolicy implementation - " +
+        policyClass);
+    return ReflectionUtils.newInstance(policyClass, conf);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac788fd4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
new file mode 100644
index 0000000..680c3b8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+
+/**
+ * Default configuration mutation ACL policy. Checks if user is YARN admin.
+ */
+public class DefaultConfigurationMutationACLPolicy implements
+    ConfigurationMutationACLPolicy {
+
+  private YarnAuthorizationProvider authorizer;
+
+  @Override
+  public void init(Configuration conf, RMContext rmContext) {
+    authorizer = YarnAuthorizationProvider.getInstance(conf);
+  }
+
+  @Override
+  public boolean isMutationAllowed(UserGroupInformation user,
+      QueueConfigsUpdateInfo confUpdate) {
+    return authorizer.isAdmin(user);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac788fd4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
index 35e36e1..93a935e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
@@ -17,10 +17,11 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
 
 import java.io.IOException;
-import java.util.Map;
 
 /**
  * Interface for a scheduler that supports changing configuration at runtime.
@@ -31,10 +32,22 @@ public interface MutableConfScheduler extends ResourceScheduler {
   /**
    * Update the scheduler's configuration.
    * @param user Caller of this update
-   * @param confUpdate key-value map of the configuration update
+   * @param confUpdate configuration update
    * @throws IOException if update is invalid
    */
   void updateConfiguration(UserGroupInformation user,
-      Map<String, String> confUpdate) throws IOException;
+      QueueConfigsUpdateInfo confUpdate) throws IOException;
 
+  /**
+   * Get the scheduler configuration.
+   * @return the scheduler configuration
+   */
+  Configuration getConfiguration();
+
+  /**
+   * Get queue object based on queue name.
+   * @param queueName the queue name
+   * @return the queue object
+   */
+  Queue getQueue(String queueName);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac788fd4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index 889c3bc..f04c128 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -18,8 +18,10 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+
 import java.io.IOException;
-import java.util.Map;
 
 /**
  * Interface for allowing changing scheduler configurations.
@@ -32,7 +34,7 @@ public interface MutableConfigurationProvider {
    * @param confUpdate Key-value pairs for configurations to be updated.
    * @throws IOException if scheduler could not be reinitialized
    */
-  void mutateConfiguration(String user, Map<String, String> confUpdate)
-      throws IOException;
+  void mutateConfiguration(UserGroupInformation user, QueueConfigsUpdateInfo
+      confUpdate) throws IOException;
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac788fd4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 85138d5..a0e7391 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -136,6 +136,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.Placeme
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SimplePlacementSet;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AppPriorityACLsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
 import org.apache.hadoop.yarn.server.utils.Lock;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
@@ -619,6 +620,7 @@ public class CapacityScheduler extends
     preemptionManager.refreshQueues(null, this.getRootQueue());
   }
 
+  @Override
   public CSQueue getQueue(String queueName) {
     if (queueName == null) {
       return null;
@@ -2491,10 +2493,10 @@ public class CapacityScheduler extends
 
   @Override
   public void updateConfiguration(UserGroupInformation user,
-      Map<String, String> confUpdate) throws IOException {
+      QueueConfigsUpdateInfo confUpdate) throws IOException {
     if (csConfProvider instanceof MutableConfigurationProvider) {
       ((MutableConfigurationProvider) csConfProvider).mutateConfiguration(
-          user.getShortUserName(), confUpdate);
+          user, confUpdate);
     } else {
       throw new UnsupportedOperationException("Configured CS configuration " +
           "provider does not support updating configuration.");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac788fd4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
index ea1b3c0..8b879b0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
@@ -18,14 +18,27 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
 
+import com.google.common.base.Joiner;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ConfigurationMutationACLPolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ConfigurationMutationACLPolicyFactory;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfigurationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore.LogMutation;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 
 /**
@@ -38,6 +51,7 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
 
   private Configuration schedConf;
   private YarnConfigurationStore confStore;
+  private ConfigurationMutationACLPolicy aclMutationPolicy;
   private RMContext rmContext;
   private Configuration conf;
 
@@ -68,6 +82,9 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
       schedConf.set(kv.getKey(), kv.getValue());
     }
     confStore.initialize(config, schedConf);
+    this.aclMutationPolicy = ConfigurationMutationACLPolicyFactory
+        .getPolicy(config);
+    aclMutationPolicy.init(config, rmContext);
     this.conf = config;
   }
 
@@ -80,12 +97,17 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
   }
 
   @Override
-  public void mutateConfiguration(String user,
-      Map<String, String> confUpdate) throws IOException {
+  public void mutateConfiguration(UserGroupInformation user,
+      QueueConfigsUpdateInfo confUpdate) throws IOException {
+    if (!aclMutationPolicy.isMutationAllowed(user, confUpdate)) {
+      throw new AccessControlException("User is not admin of all modified" +
+          " queues.");
+    }
     Configuration oldConf = new Configuration(schedConf);
-    LogMutation log = new LogMutation(confUpdate, user);
+    Map<String, String> kvUpdate = constructKeyValueConfUpdate(confUpdate);
+    LogMutation log = new LogMutation(kvUpdate, user.getShortUserName());
     long id = confStore.logMutation(log);
-    for (Map.Entry<String, String> kv : confUpdate.entrySet()) {
+    for (Map.Entry<String, String> kv : kvUpdate.entrySet()) {
       if (kv.getValue() == null) {
         schedConf.unset(kv.getKey());
       } else {
@@ -101,4 +123,125 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
     }
     confStore.confirmMutation(id, true);
   }
+
+
+  private Map<String, String> constructKeyValueConfUpdate(
+      QueueConfigsUpdateInfo mutationInfo) throws IOException {
+    CapacityScheduler cs = (CapacityScheduler) rmContext.getScheduler();
+    CapacitySchedulerConfiguration proposedConf =
+        new CapacitySchedulerConfiguration(cs.getConfiguration(), false);
+    Map<String, String> confUpdate = new HashMap<>();
+    for (String queueToRemove : mutationInfo.getRemoveQueueInfo()) {
+      removeQueue(queueToRemove, proposedConf, confUpdate);
+    }
+    for (QueueConfigInfo addQueueInfo : mutationInfo.getAddQueueInfo()) {
+      addQueue(addQueueInfo, proposedConf, confUpdate);
+    }
+    for (QueueConfigInfo updateQueueInfo : mutationInfo.getUpdateQueueInfo()) {
+      updateQueue(updateQueueInfo, proposedConf, confUpdate);
+    }
+    return confUpdate;
+  }
+
+  private void removeQueue(
+      String queueToRemove, CapacitySchedulerConfiguration proposedConf,
+      Map<String, String> confUpdate) throws IOException {
+    if (queueToRemove == null) {
+      return;
+    } else {
+      CapacityScheduler cs = (CapacityScheduler) rmContext.getScheduler();
+      String queueName = queueToRemove.substring(
+          queueToRemove.lastIndexOf('.') + 1);
+      CSQueue queue = cs.getQueue(queueName);
+      if (queue == null ||
+          !queue.getQueuePath().equals(queueToRemove)) {
+        throw new IOException("Queue " + queueToRemove + " not found");
+      } else if (queueToRemove.lastIndexOf('.') == -1) {
+        throw new IOException("Can't remove queue " + queueToRemove);
+      }
+      String parentQueuePath = queueToRemove.substring(0, queueToRemove
+          .lastIndexOf('.'));
+      String[] siblingQueues = proposedConf.getQueues(parentQueuePath);
+      List<String> newSiblingQueues = new ArrayList<>();
+      for (String siblingQueue : siblingQueues) {
+        if (!siblingQueue.equals(queueName)) {
+          newSiblingQueues.add(siblingQueue);
+        }
+      }
+      proposedConf.setQueues(parentQueuePath, newSiblingQueues
+          .toArray(new String[0]));
+      String queuesConfig = CapacitySchedulerConfiguration.PREFIX
+          + parentQueuePath + CapacitySchedulerConfiguration.DOT
+          + CapacitySchedulerConfiguration.QUEUES;
+      if (newSiblingQueues.size() == 0) {
+        confUpdate.put(queuesConfig, null);
+      } else {
+        confUpdate.put(queuesConfig, Joiner.on(',').join(newSiblingQueues));
+      }
+      for (Map.Entry<String, String> confRemove : proposedConf.getValByRegex(
+          ".*" + queueToRemove.replaceAll("\\.", "\\.") + "\\..*")
+          .entrySet()) {
+        proposedConf.unset(confRemove.getKey());
+        confUpdate.put(confRemove.getKey(), null);
+      }
+    }
+  }
+
+  private void addQueue(
+      QueueConfigInfo addInfo, CapacitySchedulerConfiguration proposedConf,
+      Map<String, String> confUpdate) throws IOException {
+    if (addInfo == null) {
+      return;
+    } else {
+      CapacityScheduler cs = (CapacityScheduler) rmContext.getScheduler();
+      String queuePath = addInfo.getQueue();
+      String queueName = queuePath.substring(queuePath.lastIndexOf('.') + 1);
+      if (cs.getQueue(queueName) != null) {
+        throw new IOException("Can't add existing queue " + queuePath);
+      } else if (queuePath.lastIndexOf('.') == -1) {
+        throw new IOException("Can't add invalid queue " + queuePath);
+      }
+      String parentQueue = queuePath.substring(0, queuePath.lastIndexOf('.'));
+      String[] siblings = proposedConf.getQueues(parentQueue);
+      List<String> siblingQueues = siblings == null ? new ArrayList<>() :
+          new ArrayList<>(Arrays.<String>asList(siblings));
+      siblingQueues.add(queuePath.substring(queuePath.lastIndexOf('.') + 1));
+      proposedConf.setQueues(parentQueue,
+          siblingQueues.toArray(new String[0]));
+      confUpdate.put(CapacitySchedulerConfiguration.PREFIX
+              + parentQueue + CapacitySchedulerConfiguration.DOT
+              + CapacitySchedulerConfiguration.QUEUES,
+          Joiner.on(',').join(siblingQueues));
+      String keyPrefix = CapacitySchedulerConfiguration.PREFIX
+          + queuePath + CapacitySchedulerConfiguration.DOT;
+      for (Map.Entry<String, String> kv : addInfo.getParams().entrySet()) {
+        if (kv.getValue() == null) {
+          proposedConf.unset(keyPrefix + kv.getKey());
+        } else {
+          proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
+        }
+        confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
+      }
+    }
+  }
+
+  private void updateQueue(QueueConfigInfo updateInfo,
+      CapacitySchedulerConfiguration proposedConf,
+      Map<String, String> confUpdate) {
+    if (updateInfo == null) {
+      return;
+    } else {
+      String queuePath = updateInfo.getQueue();
+      String keyPrefix = CapacitySchedulerConfiguration.PREFIX
+          + queuePath + CapacitySchedulerConfiguration.DOT;
+      for (Map.Entry<String, String> kv : updateInfo.getParams().entrySet()) {
+        if (kv.getValue() == null) {
+          proposedConf.unset(keyPrefix + kv.getKey());
+        } else {
+          proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
+        }
+        confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac788fd4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
new file mode 100644
index 0000000..1f94c1c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ConfigurationMutationACLPolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * A configuration mutation ACL policy which checks that user has admin
+ * privileges on all queues they are changing.
+ */
+public class QueueAdminConfigurationMutationACLPolicy implements
+    ConfigurationMutationACLPolicy {
+
+  private RMContext rmContext;
+
+  @Override
+  public void init(Configuration conf, RMContext context) {
+    this.rmContext = context;
+  }
+
+  @Override
+  public boolean isMutationAllowed(UserGroupInformation user,
+      QueueConfigsUpdateInfo confUpdate) {
+    Set<String> queues = new HashSet<>();
+    for (QueueConfigInfo addQueueInfo : confUpdate.getAddQueueInfo()) {
+      queues.add(addQueueInfo.getQueue());
+    }
+    for (String removeQueue : confUpdate.getRemoveQueueInfo()) {
+      queues.add(removeQueue);
+    }
+    for (QueueConfigInfo updateQueueInfo : confUpdate.getUpdateQueueInfo()) {
+      queues.add(updateQueueInfo.getQueue());
+    }
+    for (String queuePath : queues) {
+      String queueName = queuePath.lastIndexOf('.') != -1 ?
+          queuePath.substring(queuePath.lastIndexOf('.') + 1) : queuePath;
+      QueueInfo queueInfo = null;
+      try {
+        queueInfo = rmContext.getScheduler()
+            .getQueueInfo(queueName, false, false);
+      } catch (IOException e) {
+        // Queue is not found, do nothing.
+      }
+      String parentPath = queuePath;
+      // TODO: handle global config change.
+      while (queueInfo == null) {
+        // We are adding a queue (whose parent we are possibly also adding).
+        // Check ACL of lowest parent queue which already exists.
+        parentPath = parentPath.substring(0, parentPath.lastIndexOf('.'));
+        String parentName = parentPath.lastIndexOf('.') != -1 ?
+            parentPath.substring(parentPath.lastIndexOf('.') + 1) : parentPath;
+        try {
+          queueInfo = rmContext.getScheduler()
+              .getQueueInfo(parentName, false, false);
+        } catch (IOException e) {
+          // Queue is not found, do nothing.
+        }
+      }
+      Queue queue = ((MutableConfScheduler) rmContext.getScheduler())
+          .getQueue(queueInfo.getQueueName());
+      if (queue != null && !queue.hasAccess(QueueACL.ADMINISTER_QUEUE, user)) {
+        return false;
+      }
+    }
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac788fd4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 78eb4de..e084cca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -57,7 +57,6 @@ import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response.Status;
 
-import com.google.common.base.Joiner;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -143,7 +142,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
@@ -2643,10 +2641,8 @@ public class RMWebServices extends WebServices {
         callerUGI.doAs(new PrivilegedExceptionAction<Void>() {
           @Override
           public Void run() throws IOException, YarnException {
-            Map<String, String> confUpdate =
-                constructKeyValueConfUpdate(mutationInfo);
-            ((CapacityScheduler) scheduler).updateConfiguration(callerUGI,
-                confUpdate);
+            ((MutableConfScheduler) scheduler).updateConfiguration(callerUGI,
+                mutationInfo);
             return null;
           }
         });
@@ -2658,129 +2654,9 @@ public class RMWebServices extends WebServices {
           "successfully applied.").build();
     } else {
       return Response.status(Status.BAD_REQUEST)
-          .entity("Configuration change only supported by CapacityScheduler.")
+          .entity("Configuration change only supported by " +
+              "MutableConfScheduler.")
           .build();
     }
   }
-
-  private Map<String, String> constructKeyValueConfUpdate(
-      QueueConfigsUpdateInfo mutationInfo) throws IOException {
-    CapacitySchedulerConfiguration currentConf =
-        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
-    CapacitySchedulerConfiguration proposedConf =
-        new CapacitySchedulerConfiguration(currentConf, false);
-    Map<String, String> confUpdate = new HashMap<>();
-    for (String queueToRemove : mutationInfo.getRemoveQueueInfo()) {
-      removeQueue(queueToRemove, proposedConf, confUpdate);
-    }
-    for (QueueConfigInfo addQueueInfo : mutationInfo.getAddQueueInfo()) {
-      addQueue(addQueueInfo, proposedConf, confUpdate);
-    }
-    for (QueueConfigInfo updateQueueInfo : mutationInfo.getUpdateQueueInfo()) {
-      updateQueue(updateQueueInfo, proposedConf, confUpdate);
-    }
-    return confUpdate;
-  }
-
-  private void removeQueue(
-      String queueToRemove, CapacitySchedulerConfiguration proposedConf,
-      Map<String, String> confUpdate) throws IOException {
-    if (queueToRemove == null) {
-      return;
-    } else {
-      CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
-      String queueName = queueToRemove.substring(
-          queueToRemove.lastIndexOf('.') + 1);
-      CSQueue queue = cs.getQueue(queueName);
-      if (queue == null ||
-          !queue.getQueuePath().equals(queueToRemove)) {
-        throw new IOException("Queue " + queueToRemove + " not found");
-      } else if (queueToRemove.lastIndexOf('.') == -1) {
-        throw new IOException("Can't remove queue " + queueToRemove);
-      }
-      String parentQueuePath = queueToRemove.substring(0, queueToRemove
-          .lastIndexOf('.'));
-      String[] siblingQueues = proposedConf.getQueues(parentQueuePath);
-      List<String> newSiblingQueues = new ArrayList<>();
-      for (String siblingQueue : siblingQueues) {
-        if (!siblingQueue.equals(queueName)) {
-          newSiblingQueues.add(siblingQueue);
-        }
-      }
-      proposedConf.setQueues(parentQueuePath, newSiblingQueues
-          .toArray(new String[0]));
-      String queuesConfig = CapacitySchedulerConfiguration.PREFIX +
-          parentQueuePath + CapacitySchedulerConfiguration.DOT +
-          CapacitySchedulerConfiguration.QUEUES;
-      if (newSiblingQueues.size() == 0) {
-        confUpdate.put(queuesConfig, null);
-      } else {
-        confUpdate.put(queuesConfig, Joiner.on(',').join(newSiblingQueues));
-      }
-      for (Map.Entry<String, String> confRemove : proposedConf.getValByRegex(
-          ".*" + queueToRemove.replaceAll("\\.", "\\.") + "\\..*")
-          .entrySet()) {
-        proposedConf.unset(confRemove.getKey());
-        confUpdate.put(confRemove.getKey(), null);
-      }
-    }
-  }
-
-  private void addQueue(
-      QueueConfigInfo addInfo, CapacitySchedulerConfiguration proposedConf,
-      Map<String, String> confUpdate) throws IOException {
-    if (addInfo == null) {
-      return;
-    } else {
-      CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
-      String queuePath = addInfo.getQueue();
-      String queueName = queuePath.substring(queuePath.lastIndexOf('.') + 1);
-      if (cs.getQueue(queueName) != null) {
-        throw new IOException("Can't add existing queue " + queuePath);
-      } else if (queuePath.lastIndexOf('.') == -1) {
-        throw new IOException("Can't add invalid queue " + queuePath);
-      }
-      String parentQueue = queuePath.substring(0, queuePath.lastIndexOf('.'));
-      String[] siblings = proposedConf.getQueues(parentQueue);
-      List<String> siblingQueues = siblings == null ? new ArrayList<>() :
-          new ArrayList<>(Arrays.<String>asList(siblings));
-      siblingQueues.add(queuePath.substring(queuePath.lastIndexOf('.') + 1));
-      proposedConf.setQueues(parentQueue,
-          siblingQueues.toArray(new String[0]));
-      confUpdate.put(CapacitySchedulerConfiguration.PREFIX +
-          parentQueue + CapacitySchedulerConfiguration.DOT +
-          CapacitySchedulerConfiguration.QUEUES,
-          Joiner.on(',').join(siblingQueues));
-      String keyPrefix = CapacitySchedulerConfiguration.PREFIX +
-          queuePath + CapacitySchedulerConfiguration.DOT;
-      for (Map.Entry<String, String> kv : addInfo.getParams().entrySet()) {
-        if (kv.getValue() == null) {
-          proposedConf.unset(keyPrefix + kv.getKey());
-        } else {
-          proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
-        }
-        confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
-      }
-    }
-  }
-
-  private void updateQueue(QueueConfigInfo updateInfo,
-      CapacitySchedulerConfiguration proposedConf,
-      Map<String, String> confUpdate) {
-    if (updateInfo == null) {
-      return;
-    } else {
-      String queuePath = updateInfo.getQueue();
-      String keyPrefix = CapacitySchedulerConfiguration.PREFIX +
-          queuePath + CapacitySchedulerConfiguration.DOT;
-      for (Map.Entry<String, String> kv : updateInfo.getParams().entrySet()) {
-        if (kv.getValue() == null) {
-          proposedConf.unset(keyPrefix + kv.getKey());
-        } else {
-          proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
-        }
-        confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
-      }
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac788fd4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
new file mode 100644
index 0000000..4016dcf
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
@@ -0,0 +1,154 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.QueueAdminConfigurationMutationACLPolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Map;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class TestConfigurationMutationACLPolicies {
+
+  private ConfigurationMutationACLPolicy policy;
+  private RMContext rmContext;
+  private MutableConfScheduler scheduler;
+
+  private static final UserGroupInformation GOOD_USER = UserGroupInformation
+      .createUserForTesting("goodUser", new String[] {});
+  private static final UserGroupInformation BAD_USER = UserGroupInformation
+      .createUserForTesting("badUser", new String[] {});
+  private static final Map<String, String> EMPTY_MAP =
+      Collections.<String, String>emptyMap();
+
+  @Before
+  public void setUp() throws IOException {
+    rmContext = mock(RMContext.class);
+    scheduler = mock(MutableConfScheduler.class);
+    when(rmContext.getScheduler()).thenReturn(scheduler);
+    mockQueue("a", scheduler);
+    mockQueue("b", scheduler);
+    mockQueue("b1", scheduler);
+  }
+
+  private void mockQueue(String queueName, MutableConfScheduler scheduler)
+      throws IOException {
+    QueueInfo queueInfo = QueueInfo.newInstance(queueName, 0, 0, 0, null, null,
+        null, null, null, null, false);
+    when(scheduler.getQueueInfo(eq(queueName), anyBoolean(), anyBoolean()))
+        .thenReturn(queueInfo);
+    Queue queue = mock(Queue.class);
+    when(queue.hasAccess(eq(QueueACL.ADMINISTER_QUEUE), eq(GOOD_USER)))
+        .thenReturn(true);
+    when(queue.hasAccess(eq(QueueACL.ADMINISTER_QUEUE), eq(BAD_USER)))
+        .thenReturn(false);
+    when(scheduler.getQueue(eq(queueName))).thenReturn(queue);
+  }
+  @Test
+  public void testDefaultPolicy() {
+    Configuration conf = new Configuration();
+    conf.set(YarnConfiguration.YARN_ADMIN_ACL, GOOD_USER.getShortUserName());
+    conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+        DefaultConfigurationMutationACLPolicy.class,
+        ConfigurationMutationACLPolicy.class);
+    policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
+    policy.init(conf, rmContext);
+    assertTrue(policy.isMutationAllowed(GOOD_USER, null));
+    assertFalse(policy.isMutationAllowed(BAD_USER, null));
+  }
+  
+  @Test
+  public void testQueueAdminBasedPolicy() {
+    Configuration conf = new Configuration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+        QueueAdminConfigurationMutationACLPolicy.class,
+        ConfigurationMutationACLPolicy.class);
+    policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
+    policy.init(conf, rmContext);
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    QueueConfigInfo configInfo = new QueueConfigInfo("root.a", EMPTY_MAP);
+    updateInfo.getUpdateQueueInfo().add(configInfo);
+    assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
+    assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
+  }
+
+  @Test
+  public void testQueueAdminPolicyAddQueue() {
+    Configuration conf = new Configuration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+        QueueAdminConfigurationMutationACLPolicy.class,
+        ConfigurationMutationACLPolicy.class);
+    policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
+    policy.init(conf, rmContext);
+    // Add root.b.b1. Should check ACL of root.b queue.
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    QueueConfigInfo configInfo = new QueueConfigInfo("root.b.b2", EMPTY_MAP);
+    updateInfo.getAddQueueInfo().add(configInfo);
+    assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
+    assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
+  }
+
+  @Test
+  public void testQueueAdminPolicyAddNestedQueue() {
+    Configuration conf = new Configuration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+        QueueAdminConfigurationMutationACLPolicy.class,
+        ConfigurationMutationACLPolicy.class);
+    policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
+    policy.init(conf, rmContext);
+    // Add root.b.b1.b11. Should check ACL of root.b queue.
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    QueueConfigInfo configInfo = new QueueConfigInfo("root.b.b2.b21", EMPTY_MAP);
+    updateInfo.getAddQueueInfo().add(configInfo);
+    assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
+    assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
+  }
+
+  @Test
+  public void testQueueAdminPolicyRemoveQueue() {
+    Configuration conf = new Configuration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+        QueueAdminConfigurationMutationACLPolicy.class,
+        ConfigurationMutationACLPolicy.class);
+    policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
+    policy.init(conf, rmContext);
+    // Remove root.b.b1.
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    updateInfo.getRemoveQueueInfo().add("root.b.b1");
+    assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
+    assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac788fd4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
index 254da31..13229b1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
@@ -19,8 +19,12 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -43,22 +47,34 @@ public class TestMutableCSConfigurationProvider {
 
   private MutableCSConfigurationProvider confProvider;
   private RMContext rmContext;
-  private Map<String, String> goodUpdate;
-  private Map<String, String> badUpdate;
+  private QueueConfigsUpdateInfo goodUpdate;
+  private QueueConfigsUpdateInfo badUpdate;
   private CapacityScheduler cs;
 
-  private static final String TEST_USER = "testUser";
+  private static final UserGroupInformation TEST_USER = UserGroupInformation
+      .createUserForTesting("testUser", new String[] {});
 
   @Before
   public void setUp() {
     cs = mock(CapacityScheduler.class);
     rmContext = mock(RMContext.class);
     when(rmContext.getScheduler()).thenReturn(cs);
+    when(cs.getConfiguration()).thenReturn(
+        new CapacitySchedulerConfiguration());
     confProvider = new MutableCSConfigurationProvider(rmContext);
-    goodUpdate = new HashMap<>();
-    goodUpdate.put("goodKey", "goodVal");
-    badUpdate = new HashMap<>();
-    badUpdate.put("badKey", "badVal");
+    goodUpdate = new QueueConfigsUpdateInfo();
+    Map<String, String> goodUpdateMap = new HashMap<>();
+    goodUpdateMap.put("goodKey", "goodVal");
+    QueueConfigInfo goodUpdateInfo = new
+        QueueConfigInfo("root.a", goodUpdateMap);
+    goodUpdate.getUpdateQueueInfo().add(goodUpdateInfo);
+
+    badUpdate = new QueueConfigsUpdateInfo();
+    Map<String, String> badUpdateMap = new HashMap<>();
+    badUpdateMap.put("badKey", "badVal");
+    QueueConfigInfo badUpdateInfo = new
+        QueueConfigInfo("root.a", badUpdateMap);
+    badUpdate.getUpdateQueueInfo().add(badUpdateInfo);
   }
 
   @Test
@@ -66,15 +82,16 @@ public class TestMutableCSConfigurationProvider {
     Configuration conf = new Configuration();
     confProvider.init(conf);
     assertNull(confProvider.loadConfiguration(conf)
-        .get("goodKey"));
+        .get("yarn.scheduler.capacity.root.a.goodKey"));
 
     doNothing().when(cs).reinitialize(any(Configuration.class),
         any(RMContext.class));
     confProvider.mutateConfiguration(TEST_USER, goodUpdate);
     assertEquals("goodVal", confProvider.loadConfiguration(conf)
-        .get("goodKey"));
+        .get("yarn.scheduler.capacity.root.a.goodKey"));
 
-    assertNull(confProvider.loadConfiguration(conf).get("badKey"));
+    assertNull(confProvider.loadConfiguration(conf).get(
+        "yarn.scheduler.capacity.root.a.badKey"));
     doThrow(new IOException()).when(cs).reinitialize(any(Configuration.class),
         any(RMContext.class));
     try {
@@ -82,6 +99,7 @@ public class TestMutableCSConfigurationProvider {
     } catch (IOException e) {
       // Expected exception.
     }
-    assertNull(confProvider.loadConfiguration(conf).get("badKey"));
+    assertNull(confProvider.loadConfiguration(conf).get(
+        "yarn.scheduler.capacity.root.a.badKey"));
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: Update maven version for 3.0.0-alpha4 development

Posted by xg...@apache.org.
Update maven version for 3.0.0-alpha4 development


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16ad896d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16ad896d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16ad896d

Branch: refs/heads/YARN-5734
Commit: 16ad896d5cb8ab21e9cb2763df7c15cfcc0a6ede
Parents: 303c8dc
Author: Andrew Wang <wa...@apache.org>
Authored: Fri May 26 14:09:44 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Fri May 26 14:09:44 2017 -0700

----------------------------------------------------------------------
 hadoop-assemblies/pom.xml                                        | 4 ++--
 hadoop-build-tools/pom.xml                                       | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml                  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml     | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml                  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml    | 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml          | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml              | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml                      | 4 ++--
 hadoop-client-modules/pom.xml                                    | 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml        | 4 ++--
 hadoop-cloud-storage-project/pom.xml                             | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml                 | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml               | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml                        | 4 ++--
 hadoop-common-project/hadoop-common/pom.xml                      | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml                         | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml                     | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml                         | 4 ++--
 hadoop-common-project/pom.xml                                    | 4 ++--
 hadoop-dist/pom.xml                                              | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml                   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml                   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml            | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml                      | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml                          | 4 ++--
 hadoop-hdfs-project/pom.xml                                      | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml                       | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml                   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml                    | 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml                   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml                      | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml         | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml       | 4 ++--
 hadoop-mapreduce-project/pom.xml                                 | 4 ++--
 hadoop-maven-plugins/pom.xml                                     | 2 +-
 hadoop-minicluster/pom.xml                                       | 4 ++--
 hadoop-project-dist/pom.xml                                      | 4 ++--
 hadoop-project/pom.xml                                           | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml                               | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml                         | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml                             | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml                                  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml                       | 2 +-
 hadoop-tools/hadoop-azure/pom.xml                                | 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml                             | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml                               | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml                               | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml                              | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml                                | 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml                            | 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml                                | 4 ++--
 hadoop-tools/hadoop-rumen/pom.xml                                | 4 ++--
 hadoop-tools/hadoop-sls/pom.xml                                  | 4 ++--
 hadoop-tools/hadoop-streaming/pom.xml                            | 4 ++--
 hadoop-tools/hadoop-tools-dist/pom.xml                           | 4 ++--
 hadoop-tools/pom.xml                                             | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml          | 4 ++--
 .../hadoop-yarn-applications-distributedshell/pom.xml            | 4 ++--
 .../hadoop-yarn-applications-unmanaged-am-launcher/pom.xml       | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml       | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml       | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml     | 4 ++--
 .../hadoop-yarn-server-applicationhistoryservice/pom.xml         | 4 ++--
 .../hadoop-yarn-server/hadoop-yarn-server-common/pom.xml         | 4 ++--
 .../hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml    | 4 ++--
 .../hadoop-yarn-server-resourcemanager/pom.xml                   | 4 ++--
 .../hadoop-yarn-server-sharedcachemanager/pom.xml                | 4 ++--
 .../hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml          | 4 ++--
 .../hadoop-yarn-server-timeline-pluginstorage/pom.xml            | 4 ++--
 .../hadoop-yarn-server-timelineservice-hbase-tests/pom.xml       | 4 ++--
 .../hadoop-yarn-server-timelineservice-hbase/pom.xml             | 2 +-
 .../hadoop-yarn-server-timelineservice/pom.xml                   | 4 ++--
 .../hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml      | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml       | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml         | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml           | 4 ++--
 hadoop-yarn-project/hadoop-yarn/pom.xml                          | 4 ++--
 hadoop-yarn-project/pom.xml                                      | 4 ++--
 pom.xml                                                          | 2 +-
 84 files changed, 160 insertions(+), 160 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-assemblies/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-assemblies/pom.xml b/hadoop-assemblies/pom.xml
index 014808f..5d0dc12 100644
--- a/hadoop-assemblies/pom.xml
+++ b/hadoop-assemblies/pom.xml
@@ -23,11 +23,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-assemblies</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop Assemblies</name>
   <description>Apache Hadoop Assemblies</description>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-build-tools/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-build-tools/pom.xml b/hadoop-build-tools/pom.xml
index c645d0e..9de3e50 100644
--- a/hadoop-build-tools/pom.xml
+++ b/hadoop-build-tools/pom.xml
@@ -18,7 +18,7 @@
   <parent>
     <artifactId>hadoop-main</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-build-tools</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-client-modules/hadoop-client-api/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-api/pom.xml b/hadoop-client-modules/hadoop-client-api/pom.xml
index 76610cd..135ffb8 100644
--- a/hadoop-client-modules/hadoop-client-api/pom.xml
+++ b/hadoop-client-modules/hadoop-client-api/pom.xml
@@ -18,11 +18,11 @@
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project</artifactId>
-   <version>3.0.0-alpha3-SNAPSHOT</version>
+   <version>3.0.0-alpha4-SNAPSHOT</version>
    <relativePath>../../hadoop-project</relativePath>
 </parent>
   <artifactId>hadoop-client-api</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Client</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
index 44c82f4..4adbc92 100644
--- a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
+++ b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-check-invariants</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <description>Enforces our invariants for the api and runtime client modules.</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
index a2ba0bf..0509967 100644
--- a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
+++ b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-check-test-invariants</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <description>Enforces our invariants for the testing client modules.</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
index 99fd0c2..eaecbc0 100644
--- a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
+++ b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-integration-tests</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
 
   <description>Checks that we can use the generated artifacts</description>
   <name>Apache Hadoop Client Packaging Integration Tests</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-client-modules/hadoop-client-minicluster/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 01fc0ef..ac95bf5 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-minicluster</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Minicluster for Clients</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-client-modules/hadoop-client-runtime/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 151191c..dc0f005 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -18,11 +18,11 @@
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project</artifactId>
-   <version>3.0.0-alpha3-SNAPSHOT</version>
+   <version>3.0.0-alpha4-SNAPSHOT</version>
    <relativePath>../../hadoop-project</relativePath>
 </parent>
   <artifactId>hadoop-client-runtime</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Client</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-client-modules/hadoop-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client/pom.xml b/hadoop-client-modules/hadoop-client/pom.xml
index 6bc7232..9ecfd78 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -18,11 +18,11 @@
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project-dist</artifactId>
-   <version>3.0.0-alpha3-SNAPSHOT</version>
+   <version>3.0.0-alpha4-SNAPSHOT</version>
    <relativePath>../../hadoop-project-dist</relativePath>
 </parent>
   <artifactId>hadoop-client</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
 
   <description>Apache Hadoop Client aggregation pom with dependencies exposed</description>
   <name>Apache Hadoop Client Aggregator</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-client-modules/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/pom.xml b/hadoop-client-modules/pom.xml
index fe6e68d..8a90a58 100644
--- a/hadoop-client-modules/pom.xml
+++ b/hadoop-client-modules/pom.xml
@@ -18,7 +18,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-modules</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
index 3211474..59ef3ca 100644
--- a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
+++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-cloud-storage</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Cloud Storage</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-cloud-storage-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-cloud-storage-project/pom.xml b/hadoop-cloud-storage-project/pom.xml
index 09b60fc..73baa47 100644
--- a/hadoop-cloud-storage-project/pom.xml
+++ b/hadoop-cloud-storage-project/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-cloud-storage-project</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Cloud Storage Project</description>
   <name>Apache Hadoop Cloud Storage Project</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-common-project/hadoop-annotations/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-annotations/pom.xml b/hadoop-common-project/hadoop-annotations/pom.xml
index 56e7610..4c01494 100644
--- a/hadoop-common-project/hadoop-annotations/pom.xml
+++ b/hadoop-common-project/hadoop-annotations/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-annotations</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Annotations</description>
   <name>Apache Hadoop Annotations</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-common-project/hadoop-auth-examples/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml
index 4904bd2..a895bad 100644
--- a/hadoop-common-project/hadoop-auth-examples/pom.xml
+++ b/hadoop-common-project/hadoop-auth-examples/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-auth-examples</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <packaging>war</packaging>
 
   <name>Apache Hadoop Auth Examples</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-common-project/hadoop-auth/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml
index b4beb82..f608daa 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-auth</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop Auth</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index e8b5317..7ef0462 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-common</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Common</description>
   <name>Apache Hadoop Common</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-common-project/hadoop-kms/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml
index b55dd08..d84efa4 100644
--- a/hadoop-common-project/hadoop-kms/pom.xml
+++ b/hadoop-common-project/hadoop-kms/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-kms</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop KMS</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-common-project/hadoop-minikdc/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml
index f4baef1..d9e01ec 100644
--- a/hadoop-common-project/hadoop-minikdc/pom.xml
+++ b/hadoop-common-project/hadoop-minikdc/pom.xml
@@ -18,12 +18,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-minikdc</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop MiniKDC</description>
   <name>Apache Hadoop MiniKDC</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-common-project/hadoop-nfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml
index 5d35730..9340128 100644
--- a/hadoop-common-project/hadoop-nfs/pom.xml
+++ b/hadoop-common-project/hadoop-nfs/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-nfs</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop NFS</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-common-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml
index 803936a..63abb60 100644
--- a/hadoop-common-project/pom.xml
+++ b/hadoop-common-project/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-common-project</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Common Project</description>
   <name>Apache Hadoop Common Project</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 875cfe2..0f90069 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-dist</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Distribution</description>
   <name>Apache Hadoop Distribution</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index 9fd7bb4..5a82f42 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-client</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop HDFS Client</description>
   <name>Apache Hadoop HDFS Client</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index d0a01cb..64f08ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-httpfs</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop HttpFS</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
index 8e6cc9c..d139f7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-native-client</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop HDFS Native Client</description>
   <name>Apache Hadoop HDFS Native Client</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
index 6541669..0268067 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-nfs</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop HDFS-NFS</description>
   <name>Apache Hadoop HDFS-NFS</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 6e1a318..3deea50 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-hdfs</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop HDFS</description>
   <name>Apache Hadoop HDFS</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-hdfs-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/pom.xml b/hadoop-hdfs-project/pom.xml
index 5a718b4..d18cc7c 100644
--- a/hadoop-hdfs-project/pom.xml
+++ b/hadoop-hdfs-project/pom.xml
@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-project</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop HDFS Project</description>
   <name>Apache Hadoop HDFS Project</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
index a730fb5..dcf93bf 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-app</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce App</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
index e9f03ed..60f213c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-common</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce Common</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index c51072d..dc1fee8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-core</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce Core</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml
index 7926081..9210596 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-hs-plugins</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce HistoryServer Plugins</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
index adbab5a..0b026ed 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-hs</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce HistoryServer</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
index 1747f59..527e061 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce JobClient</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml
index 1045000..721e28a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-nativetask</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce NativeTask</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
index 260647e..60adf00 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-shuffle</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce Shuffle</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
index 1d692a5..0f453a8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-mapreduce-client</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce Client</name>
   <packaging>pom</packaging>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
index 0736eb6..948ebbc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-mapreduce-examples</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop MapReduce Examples</description>
   <name>Apache Hadoop MapReduce Examples</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-mapreduce-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml
index 52c9c07..060fe4b 100644
--- a/hadoop-mapreduce-project/pom.xml
+++ b/hadoop-mapreduce-project/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-mapreduce</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>Apache Hadoop MapReduce</name>
   <url>http://hadoop.apache.org/mapreduce/</url>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-maven-plugins/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml
index 8bad205..b10ee1e 100644
--- a/hadoop-maven-plugins/pom.xml
+++ b/hadoop-maven-plugins/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-maven-plugins</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-minicluster/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-minicluster/pom.xml b/hadoop-minicluster/pom.xml
index 3f848c9..6c39f6c 100644
--- a/hadoop-minicluster/pom.xml
+++ b/hadoop-minicluster/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-minicluster</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Mini-Cluster</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-project-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index 8f386e7..a4abbd6 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-project-dist</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Project Dist POM</description>
   <name>Apache Hadoop Project Dist POM</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 5e1da34..3cfdc18 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -20,10 +20,10 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-main</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <artifactId>hadoop-project</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Project POM</description>
   <name>Apache Hadoop Project POM</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/hadoop-aliyun/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/pom.xml b/hadoop-tools/hadoop-aliyun/pom.xml
index 74461fe..e48cbf4 100644
--- a/hadoop-tools/hadoop-aliyun/pom.xml
+++ b/hadoop-tools/hadoop-aliyun/pom.xml
@@ -18,7 +18,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-aliyun</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/hadoop-archive-logs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archive-logs/pom.xml b/hadoop-tools/hadoop-archive-logs/pom.xml
index b45ad98..a6ea650 100644
--- a/hadoop-tools/hadoop-archive-logs/pom.xml
+++ b/hadoop-tools/hadoop-archive-logs/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-archive-logs</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Archive Logs</description>
   <name>Apache Hadoop Archive Logs</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/hadoop-archives/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archives/pom.xml b/hadoop-tools/hadoop-archives/pom.xml
index 257be77..7d7131a 100644
--- a/hadoop-tools/hadoop-archives/pom.xml
+++ b/hadoop-tools/hadoop-archives/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-archives</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Archives</description>
   <name>Apache Hadoop Archives</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/hadoop-aws/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index e00ae10..ca3c355 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-aws</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop Amazon Web Services support</name>
   <description>
     This module contains code to support integration with Amazon Web Services.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/hadoop-azure-datalake/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml b/hadoop-tools/hadoop-azure-datalake/pom.xml
index 6e1306b..c6b6350 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-azure-datalake</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/hadoop-azure/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml
index 0f59c46..37c37a3 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-azure</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/hadoop-datajoin/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-datajoin/pom.xml b/hadoop-tools/hadoop-datajoin/pom.xml
index 913d9d0..c8a91ed 100644
--- a/hadoop-tools/hadoop-datajoin/pom.xml
+++ b/hadoop-tools/hadoop-datajoin/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-datajoin</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Data Join</description>
   <name>Apache Hadoop Data Join</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/hadoop-distcp/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml
index 81a1595..bccaac1 100644
--- a/hadoop-tools/hadoop-distcp/pom.xml
+++ b/hadoop-tools/hadoop-distcp/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-distcp</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Distributed Copy</description>
   <name>Apache Hadoop Distributed Copy</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/hadoop-extras/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-extras/pom.xml b/hadoop-tools/hadoop-extras/pom.xml
index c8ec9ce..d08ad39 100644
--- a/hadoop-tools/hadoop-extras/pom.xml
+++ b/hadoop-tools/hadoop-extras/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-extras</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Extras</description>
   <name>Apache Hadoop Extras</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/hadoop-gridmix/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-gridmix/pom.xml b/hadoop-tools/hadoop-gridmix/pom.xml
index d6b39f3..45ee039 100644
--- a/hadoop-tools/hadoop-gridmix/pom.xml
+++ b/hadoop-tools/hadoop-gridmix/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-gridmix</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Gridmix</description>
   <name>Apache Hadoop Gridmix</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/hadoop-kafka/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-kafka/pom.xml b/hadoop-tools/hadoop-kafka/pom.xml
index b9273fe..8f41de1 100644
--- a/hadoop-tools/hadoop-kafka/pom.xml
+++ b/hadoop-tools/hadoop-kafka/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-kafka</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop Kafka Library support</name>
   <description>
     This module contains code to support integration with Kafka.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/hadoop-openstack/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/pom.xml b/hadoop-tools/hadoop-openstack/pom.xml
index f80fcdf..5169ec5 100644
--- a/hadoop-tools/hadoop-openstack/pom.xml
+++ b/hadoop-tools/hadoop-openstack/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-openstack</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop OpenStack support</name>
   <description>
     This module contains code to support integration with OpenStack.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/hadoop-pipes/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-pipes/pom.xml b/hadoop-tools/hadoop-pipes/pom.xml
index 15834ed..1061d9c 100644
--- a/hadoop-tools/hadoop-pipes/pom.xml
+++ b/hadoop-tools/hadoop-pipes/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-pipes</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Pipes</description>
   <name>Apache Hadoop Pipes</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/hadoop-rumen/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/pom.xml b/hadoop-tools/hadoop-rumen/pom.xml
index 6d89466..99e4367 100644
--- a/hadoop-tools/hadoop-rumen/pom.xml
+++ b/hadoop-tools/hadoop-rumen/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-rumen</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Rumen</description>
   <name>Apache Hadoop Rumen</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/hadoop-sls/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/pom.xml b/hadoop-tools/hadoop-sls/pom.xml
index 8fb57f3..d70021c 100644
--- a/hadoop-tools/hadoop-sls/pom.xml
+++ b/hadoop-tools/hadoop-sls/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-sls</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Scheduler Load Simulator</description>
   <name>Apache Hadoop Scheduler Load Simulator</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/hadoop-streaming/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-streaming/pom.xml b/hadoop-tools/hadoop-streaming/pom.xml
index 9ef9664..1eb4eae 100644
--- a/hadoop-tools/hadoop-streaming/pom.xml
+++ b/hadoop-tools/hadoop-streaming/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-streaming</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop MapReduce Streaming</description>
   <name>Apache Hadoop MapReduce Streaming</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/hadoop-tools-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml
index dd28404..8204123 100644
--- a/hadoop-tools/hadoop-tools-dist/pom.xml
+++ b/hadoop-tools/hadoop-tools-dist/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-tools-dist</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Tools Dist</description>
   <name>Apache Hadoop Tools Dist</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-tools/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml
index 056205c..55ee414 100644
--- a/hadoop-tools/pom.xml
+++ b/hadoop-tools/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-tools</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Tools</description>
   <name>Apache Hadoop Tools</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
index 55de5d5..33f16f9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-api</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN API</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
index b3db4e7..9aa8951 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-applications</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-applications-distributedshell</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN DistributedShell</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
index 66a6ca1..99edc0a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-applications</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-applications-unmanaged-am-launcher</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN Unmanaged Am Launcher</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
index f85a846..d6090be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-applications</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN Applications</name>
   <packaging>pom</packaging>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
index 74a3998..2501109 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
@@ -17,10 +17,10 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <artifactId>hadoop-yarn-client</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN Client</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index 5707444..f9cfb11 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-common</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN Common</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
index 25bfa2e..09cf9c7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-registry</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN Registry</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
index b13271d..641d5fe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-applicationhistoryservice</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN ApplicationHistoryService</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index fc23af8..465f71a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-common</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN Server Common</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
index 1445d2a..75ec0a2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-nodemanager</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN NodeManager</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index 8505df8..c40dfa3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN ResourceManager</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml
index 4d30638..fb59131 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml
@@ -17,10 +17,10 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <artifactId>hadoop-yarn-server-sharedcachemanager</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN SharedCacheManager</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
index 66d9270..e57442c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
@@ -19,10 +19,10 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <artifactId>hadoop-yarn-server-tests</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN Server Tests</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml
index a3a44f8..f536df6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-timeline-pluginstorage</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN Timeline Plugin Storage</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
index afe440f..bacd017 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-timelineservice-hbase-tests</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN Timeline Service HBase tests</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
index 6c153ad..a2a5c2e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
@@ -22,7 +22,7 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-timelineservice-hbase</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
index 3b1a856..a205e65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-timelineservice</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN Timeline Service</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
index 75e49a5..34ffb08 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-web-proxy</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN Web Proxy</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
index 384ced4..a0b0082 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN Server</name>
   <packaging>pom</packaging>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml
index 0820945..d4fcf90 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-site</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN Site</name>
   <packaging>pom</packaging>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index bec99cc..b920a32 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-ui</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN UI</name>
   <packaging>${packaging.type}</packaging>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/hadoop-yarn/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml
index c43588a..725b133 100644
--- a/hadoop-yarn-project/hadoop-yarn/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/pom.xml
@@ -16,11 +16,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-yarn</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>Apache Hadoop YARN</name>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/hadoop-yarn-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml
index a64f25a..2e3c095 100644
--- a/hadoop-yarn-project/pom.xml
+++ b/hadoop-yarn-project/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-yarn-project</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>Apache Hadoop YARN Project</name>
   <url>http://hadoop.apache.org/yarn/</url>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ad896d/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index f07256d..17458bc 100644
--- a/pom.xml
+++ b/pom.xml
@@ -18,7 +18,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-main</artifactId>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <description>Apache Hadoop Main</description>
   <name>Apache Hadoop Main</name>
   <packaging>pom</packaging>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: HDFS-11359. DFSAdmin report command supports displaying maintenance state datanodes. Contributed by Yiqun Lin.

Posted by xg...@apache.org.
HDFS-11359. DFSAdmin report command supports displaying maintenance state datanodes. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d9084eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d9084eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d9084eb

Branch: refs/heads/YARN-5734
Commit: 8d9084eb62f4593d4dfeb618abacf6ae89019109
Parents: d95c82c
Author: Yiqun Lin <yq...@apache.org>
Authored: Fri Jun 2 12:48:30 2017 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Fri Jun 2 12:48:30 2017 +0800

----------------------------------------------------------------------
 .../hadoop/hdfs/protocol/HdfsConstants.java     |  2 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  8 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |  2 +
 .../server/blockmanagement/DatanodeManager.java |  7 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 88 +++++++++---------
 .../src/site/markdown/HDFSCommands.md           |  4 +-
 .../hadoop/hdfs/TestMaintenanceState.java       | 94 ++++++++++++++++++++
 .../src/test/resources/testHDFSConf.xml         |  2 +-
 8 files changed, 161 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d9084eb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 0d31bc4..b636121 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -141,7 +141,7 @@ public final class HdfsConstants {
 
   // type of the datanode report
   public enum DatanodeReportType {
-    ALL, LIVE, DEAD, DECOMMISSIONING, ENTERING_MAINTENANCE
+    ALL, LIVE, DEAD, DECOMMISSIONING, ENTERING_MAINTENANCE, IN_MAINTENANCE
   }
 
   /* Hidden constructor */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d9084eb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 141a05a..1c2c83f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -1723,6 +1723,10 @@ public class PBHelperClient {
     case LIVE: return DatanodeReportTypeProto.LIVE;
     case DEAD: return DatanodeReportTypeProto.DEAD;
     case DECOMMISSIONING: return DatanodeReportTypeProto.DECOMMISSIONING;
+    case ENTERING_MAINTENANCE:
+      return DatanodeReportTypeProto.ENTERING_MAINTENANCE;
+    case IN_MAINTENANCE:
+      return DatanodeReportTypeProto.IN_MAINTENANCE;
     default:
       throw new IllegalArgumentException("Unexpected data type report:" + t);
     }
@@ -2126,6 +2130,10 @@ public class PBHelperClient {
     case LIVE: return DatanodeReportType.LIVE;
     case DEAD: return DatanodeReportType.DEAD;
     case DECOMMISSIONING: return DatanodeReportType.DECOMMISSIONING;
+    case ENTERING_MAINTENANCE:
+      return DatanodeReportType.ENTERING_MAINTENANCE;
+    case IN_MAINTENANCE:
+      return DatanodeReportType.IN_MAINTENANCE;
     default:
       throw new IllegalArgumentException("Unexpected data type report:" + t);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d9084eb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index a4b6f0b..fb42271 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -332,6 +332,8 @@ enum DatanodeReportTypeProto {  // type of the datanode report
   LIVE = 2;
   DEAD = 3;
   DECOMMISSIONING = 4;
+  ENTERING_MAINTENANCE = 5;
+  IN_MAINTENANCE = 6;
 }
 
 message GetDatanodeReportRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d9084eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index c303594..a786c6a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1441,6 +1441,9 @@ public class DatanodeManager {
     final boolean listEnteringMaintenanceNodes =
         type == DatanodeReportType.ALL ||
         type == DatanodeReportType.ENTERING_MAINTENANCE;
+    final boolean listInMaintenanceNodes =
+        type == DatanodeReportType.ALL ||
+        type == DatanodeReportType.IN_MAINTENANCE;
 
     ArrayList<DatanodeDescriptor> nodes;
     final HostSet foundNodes = new HostSet();
@@ -1453,11 +1456,13 @@ public class DatanodeManager {
         final boolean isDead = isDatanodeDead(dn);
         final boolean isDecommissioning = dn.isDecommissionInProgress();
         final boolean isEnteringMaintenance = dn.isEnteringMaintenance();
+        final boolean isInMaintenance = dn.isInMaintenance();
 
         if (((listLiveNodes && !isDead) ||
             (listDeadNodes && isDead) ||
             (listDecommissioningNodes && isDecommissioning) ||
-            (listEnteringMaintenanceNodes && isEnteringMaintenance)) &&
+            (listEnteringMaintenanceNodes && isEnteringMaintenance) ||
+            (listInMaintenanceNodes && isInMaintenance)) &&
             hostConfigManager.isIncluded(dn)) {
           nodes.add(dn);
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d9084eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 5375cd9..e59ea37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -421,7 +421,8 @@ public class DFSAdmin extends FsShell {
    * "hdfs dfsadmin"
    */
   private static final String commonUsageSummary =
-    "\t[-report [-live] [-dead] [-decommissioning]]\n" +
+    "\t[-report [-live] [-dead] [-decommissioning] " +
+    "[-enteringmaintenance] [-inmaintenance]]\n" +
     "\t[-safemode <enter | leave | get | wait>]\n" +
     "\t[-saveNamespace [-beforeShutdown]]\n" +
     "\t[-rollEdits]\n" +
@@ -544,48 +545,51 @@ public class DFSAdmin extends FsShell {
     final boolean listDead = StringUtils.popOption("-dead", args);
     final boolean listDecommissioning =
         StringUtils.popOption("-decommissioning", args);
+    final boolean listEnteringMaintenance =
+        StringUtils.popOption("-enteringmaintenance", args);
+    final boolean listInMaintenance =
+        StringUtils.popOption("-inmaintenance", args);
+
 
     // If no filter flags are found, then list all DN types
-    boolean listAll = (!listLive && !listDead && !listDecommissioning);
+    boolean listAll = (!listLive && !listDead && !listDecommissioning
+        && !listEnteringMaintenance && !listInMaintenance);
 
     if (listAll || listLive) {
-      DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
-      if (live.length > 0 || listLive) {
-        System.out.println("Live datanodes (" + live.length + "):\n");
-      }
-      if (live.length > 0) {
-        for (DatanodeInfo dn : live) {
-          System.out.println(dn.getDatanodeReport());
-          System.out.println();
-        }
-      }
+      printDataNodeReports(dfs, DatanodeReportType.LIVE, listLive, "Live");
     }
 
     if (listAll || listDead) {
-      DatanodeInfo[] dead = dfs.getDataNodeStats(DatanodeReportType.DEAD);
-      if (dead.length > 0 || listDead) {
-        System.out.println("Dead datanodes (" + dead.length + "):\n");
-      }
-      if (dead.length > 0) {
-        for (DatanodeInfo dn : dead) {
-          System.out.println(dn.getDatanodeReport());
-          System.out.println();
-        }
-      }
+      printDataNodeReports(dfs, DatanodeReportType.DEAD, listDead, "Dead");
     }
 
     if (listAll || listDecommissioning) {
-      DatanodeInfo[] decom =
-          dfs.getDataNodeStats(DatanodeReportType.DECOMMISSIONING);
-      if (decom.length > 0 || listDecommissioning) {
-        System.out.println("Decommissioning datanodes (" + decom.length
-            + "):\n");
-      }
-      if (decom.length > 0) {
-        for (DatanodeInfo dn : decom) {
-          System.out.println(dn.getDatanodeReport());
-          System.out.println();
-        }
+      printDataNodeReports(dfs, DatanodeReportType.DECOMMISSIONING,
+          listDecommissioning, "Decommissioning");
+    }
+
+    if (listAll || listEnteringMaintenance) {
+      printDataNodeReports(dfs, DatanodeReportType.ENTERING_MAINTENANCE,
+          listEnteringMaintenance, "Entering maintenance");
+    }
+
+    if (listAll || listInMaintenance) {
+      printDataNodeReports(dfs, DatanodeReportType.IN_MAINTENANCE,
+          listInMaintenance, "In maintenance");
+    }
+  }
+
+  private static void printDataNodeReports(DistributedFileSystem dfs,
+      DatanodeReportType type, boolean listNodes, String nodeState)
+      throws IOException {
+    DatanodeInfo[] nodes = dfs.getDataNodeStats(type);
+    if (nodes.length > 0 || listNodes) {
+      System.out.println(nodeState + " datanodes (" + nodes.length + "):\n");
+    }
+    if (nodes.length > 0) {
+      for (DatanodeInfo dn : nodes) {
+        System.out.println(dn.getDatanodeReport());
+        System.out.println();
       }
     }
   }
@@ -986,12 +990,13 @@ public class DFSAdmin extends FsShell {
       "hdfs dfsadmin\n" +
       commonUsageSummary;
 
-    String report ="-report [-live] [-dead] [-decommissioning]:\n" +
-      "\tReports basic filesystem information and statistics. \n" +
-      "\tThe dfs usage can be different from \"du\" usage, because it\n" +
-      "\tmeasures raw space used by replication, checksums, snapshots\n" +
-      "\tand etc. on all the DNs.\n" +
-      "\tOptional flags may be used to filter the list of displayed DNs.\n";
+    String report ="-report [-live] [-dead] [-decommissioning] "
+        + "[-enteringmaintenance] [-inmaintenance]:\n" +
+        "\tReports basic filesystem information and statistics. \n" +
+        "\tThe dfs usage can be different from \"du\" usage, because it\n" +
+        "\tmeasures raw space used by replication, checksums, snapshots\n" +
+        "\tand etc. on all the DNs.\n" +
+        "\tOptional flags may be used to filter the list of displayed DNs.\n";
 
     String safemode = "-safemode <enter|leave|get|wait|forceExit>:  Safe mode " +
         "maintenance command.\n" +
@@ -1779,7 +1784,8 @@ public class DFSAdmin extends FsShell {
   private static void printUsage(String cmd) {
     if ("-report".equals(cmd)) {
       System.err.println("Usage: hdfs dfsadmin"
-          + " [-report] [-live] [-dead] [-decommissioning]");
+          + " [-report] [-live] [-dead] [-decommissioning]"
+          + " [-enteringmaintenance] [-inmaintenance]");
     } else if ("-safemode".equals(cmd)) {
       System.err.println("Usage: hdfs dfsadmin"
           + " [-safemode enter | leave | get | wait | forceExit]");
@@ -1917,7 +1923,7 @@ public class DFSAdmin extends FsShell {
         return exitCode;
       }
     } else if ("-report".equals(cmd)) {
-      if (argv.length > 4) {
+      if (argv.length > 6) {
         printUsage(cmd);
         return exitCode;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d9084eb/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index b8d1362..6765a8f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -338,7 +338,7 @@ Runs a HDFS datanode.
 
 Usage:
 
-        hdfs dfsadmin [-report [-live] [-dead] [-decommissioning]]
+        hdfs dfsadmin [-report [-live] [-dead] [-decommissioning] [-enteringmaintenance] [-inmaintenance]]
         hdfs dfsadmin [-safemode enter | leave | get | wait | forceExit]
         hdfs dfsadmin [-saveNamespace [-beforeShutdown]]
         hdfs dfsadmin [-rollEdits]
@@ -374,7 +374,7 @@ Usage:
 
 | COMMAND\_OPTION | Description |
 |:---- |:---- |
-| `-report` `[-live]` `[-dead]` `[-decommissioning]` | Reports basic filesystem information and statistics, The dfs usage can be different from "du" usage, because it measures raw space used by replication, checksums, snapshots and etc. on all the DNs. Optional flags may be used to filter the list of displayed DataNodes. |
+| `-report` `[-live]` `[-dead]` `[-decommissioning]` `[-enteringmaintenance]` `[-inmaintenance]` | Reports basic filesystem information and statistics, The dfs usage can be different from "du" usage, because it measures raw space used by replication, checksums, snapshots and etc. on all the DNs. Optional flags may be used to filter the list of displayed DataNodes. |
 | `-safemode` enter\|leave\|get\|wait\|forceExit | Safe mode maintenance command. Safe mode is a Namenode state in which it <br/>1. does not accept changes to the name space (read-only) <br/>2. does not replicate or delete blocks. <br/>Safe mode is entered automatically at Namenode startup, and leaves safe mode automatically when the configured minimum percentage of blocks satisfies the minimum replication condition. If Namenode detects any anomaly then it will linger in safe mode till that issue is resolved. If that anomaly is the consequence of a deliberate action, then administrator can use -safemode forceExit to exit safe mode. The cases where forceExit may be required are<br/> 1. Namenode metadata is not consistent. If Namenode detects that metadata has been modified out of band and can cause data loss, then Namenode will enter forceExit state. At that point user can either restart Namenode with correct metadata files or forceExit (if data loss is acceptable).<br/>2. Rollback c
 auses metadata to be replaced and rarely it can trigger safe mode forceExit state in Namenode. In that case you may proceed by issuing -safemode forceExit.<br/> Safe mode can also be entered manually, but then it can only be turned off manually as well. |
 | `-saveNamespace` `[-beforeShutdown]` | Save current namespace into storage directories and reset edits log. Requires safe mode. If the "beforeShutdown" option is given, the NameNode does a checkpoint if and only if no checkpoint has been done during a time window (a configurable number of checkpoint periods). This is usually used before shutting down the NameNode to prevent potential fsimage/editlog corruption. |
 | `-rollEdits` | Rolls the edit log on the active NameNode. |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d9084eb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
index b49fba0..e0dfb4a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
@@ -17,11 +17,18 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.hamcrest.CoreMatchers.allOf;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.not;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.ByteArrayOutputStream;
 import java.io.IOException;
+import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
@@ -29,6 +36,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -43,8 +51,10 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.ToolRunner;
 import org.junit.Assert;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -1124,4 +1134,88 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
       return null;
     }
   }
+
+  @Test(timeout = 120000)
+  public void testReportMaintenanceNodes() throws Exception {
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    ByteArrayOutputStream err = new ByteArrayOutputStream();
+    System.setOut(new PrintStream(out));
+    System.setErr(new PrintStream(err));
+
+    LOG.info("Starting testReportMaintenanceNodes");
+    int expirationInMs = 30 * 1000;
+    int numNodes = 2;
+    setMinMaintenanceR(numNodes);
+
+    startCluster(1, numNodes);
+    getCluster().waitActive();
+
+    FileSystem fileSys = getCluster().getFileSystem(0);
+    getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
+        fileSys.getUri().toString());
+    DFSAdmin dfsAdmin = new DFSAdmin(getConf());
+
+    FSNamesystem fsn = getCluster().getNameNode().getNamesystem();
+    assertEquals(numNodes, fsn.getNumLiveDataNodes());
+
+    int ret = ToolRunner.run(dfsAdmin,
+        new String[] {"-report", "-enteringmaintenance", "-inmaintenance"});
+    assertEquals(0, ret);
+    assertThat(out.toString(),
+        is(allOf(containsString("Entering maintenance datanodes (0):"),
+            containsString("In maintenance datanodes (0):"),
+            not(containsString(
+                getCluster().getDataNodes().get(0).getDisplayName())),
+            not(containsString(
+                getCluster().getDataNodes().get(1).getDisplayName())))));
+
+    final Path file = new Path("/testReportMaintenanceNodes.dat");
+    writeFile(fileSys, file, numNodes, 1);
+
+    DatanodeInfo[] nodes = getFirstBlockReplicasDatanodeInfos(fileSys, file);
+    // Request maintenance for DataNodes1. The DataNode1 will not transition
+    // to the next state AdminStates.IN_MAINTENANCE immediately since there
+    // are not enough candidate nodes to satisfy the min maintenance
+    // replication.
+    DatanodeInfo maintenanceDN = takeNodeOutofService(0,
+        nodes[0].getDatanodeUuid(), Time.now() + expirationInMs, null, null,
+        AdminStates.ENTERING_MAINTENANCE);
+    assertEquals(1, fsn.getNumEnteringMaintenanceDataNodes());
+
+    // reset stream
+    out.reset();
+    err.reset();
+
+    ret = ToolRunner.run(dfsAdmin,
+        new String[] {"-report", "-enteringmaintenance"});
+    assertEquals(0, ret);
+    assertThat(out.toString(),
+        is(allOf(containsString("Entering maintenance datanodes (1):"),
+            containsString(nodes[0].getXferAddr()),
+            not(containsString(nodes[1].getXferAddr())))));
+
+    // reset stream
+    out.reset();
+    err.reset();
+
+    // start a new datanode to make state transition to
+    // AdminStates.IN_MAINTENANCE
+    getCluster().startDataNodes(getConf(), 1, true, null, null);
+    getCluster().waitActive();
+
+    waitNodeState(maintenanceDN, AdminStates.IN_MAINTENANCE);
+    assertEquals(1, fsn.getNumInMaintenanceLiveDataNodes());
+
+    ret = ToolRunner.run(dfsAdmin,
+        new String[] {"-report", "-inmaintenance"});
+    assertEquals(0, ret);
+    assertThat(out.toString(),
+        is(allOf(containsString("In maintenance datanodes (1):"),
+            containsString(nodes[0].getXferAddr()),
+            not(containsString(nodes[1].getXferAddr())),
+            not(containsString(
+                getCluster().getDataNodes().get(2).getDisplayName())))));
+
+    cleanupFile(getCluster().getFileSystem(), file);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d9084eb/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index 88518bf..9302507 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -15665,7 +15665,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-report \[-live\] \[-dead\] \[-decommissioning\]:(.)*</expected-output>
+          <expected-output>^-report \[-live\] \[-dead\] \[-decommissioning\] \[-enteringmaintenance\] \[-inmaintenance\]:(.)*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: YARN-6316 Provide help information and documentation for TimelineSchemaCreator (Contributed by Haibo Chen via Vrushali C)

Posted by xg...@apache.org.
YARN-6316 Provide help information and documentation for TimelineSchemaCreator (Contributed by Haibo Chen via Vrushali C)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d95c82cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d95c82cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d95c82cb

Branch: refs/heads/YARN-5734
Commit: d95c82cb79162da0e6bb6b503d766866aa7987e6
Parents: 7101477
Author: vrushali <vr...@apache.org>
Authored: Thu Jun 1 18:30:23 2017 -0700
Committer: vrushali <vr...@apache.org>
Committed: Thu Jun 1 18:30:23 2017 -0700

----------------------------------------------------------------------
 .../storage/TimelineSchemaCreator.java          | 144 +++++++++++++------
 .../src/site/markdown/TimelineServiceV2.md      |   5 +-
 2 files changed, 101 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d95c82cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
index 9369d6a..b436eec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
@@ -64,6 +64,8 @@ public final class TimelineSchemaCreator {
   private static final String APP_TO_FLOW_TABLE_NAME_SHORT = "a2f";
   private static final String TTL_OPTION_SHORT = "m";
   private static final String ENTITY_TABLE_NAME_SHORT = "e";
+  private static final String HELP_SHORT = "h";
+  private static final String CREATE_TABLES_SHORT = "c";
 
   public static void main(String[] args) throws Exception {
 
@@ -75,54 +77,44 @@ public final class TimelineSchemaCreator {
     // Grab the arguments we're looking for.
     CommandLine commandLine = parseArgs(otherArgs);
 
-    // Grab the entityTableName argument
-    String entityTableName
-        = commandLine.getOptionValue(ENTITY_TABLE_NAME_SHORT);
-    if (StringUtils.isNotBlank(entityTableName)) {
-      hbaseConf.set(EntityTable.TABLE_NAME_CONF_NAME, entityTableName);
-    }
-    String entityTableTTLMetrics = commandLine.getOptionValue(TTL_OPTION_SHORT);
-    if (StringUtils.isNotBlank(entityTableTTLMetrics)) {
-      int metricsTTL = Integer.parseInt(entityTableTTLMetrics);
-      new EntityTable().setMetricsTTL(metricsTTL, hbaseConf);
-    }
-    // Grab the appToflowTableName argument
-    String appToflowTableName = commandLine.getOptionValue(
-        APP_TO_FLOW_TABLE_NAME_SHORT);
-    if (StringUtils.isNotBlank(appToflowTableName)) {
-      hbaseConf.set(AppToFlowTable.TABLE_NAME_CONF_NAME, appToflowTableName);
-    }
-    // Grab the applicationTableName argument
-    String applicationTableName = commandLine.getOptionValue(
-        APP_TABLE_NAME_SHORT);
-    if (StringUtils.isNotBlank(applicationTableName)) {
-      hbaseConf.set(ApplicationTable.TABLE_NAME_CONF_NAME,
-          applicationTableName);
-    }
-
-    List<Exception> exceptions = new ArrayList<>();
-    try {
-      boolean skipExisting
-          = commandLine.hasOption(SKIP_EXISTING_TABLE_OPTION_SHORT);
-      if (skipExisting) {
-        LOG.info("Will skip existing tables and continue on htable creation "
-            + "exceptions!");
+    if (commandLine.hasOption(HELP_SHORT)) {
+      // -help option has the highest precedence
+      printUsage();
+    } else if (commandLine.hasOption(CREATE_TABLES_SHORT)) {
+      // Grab the entityTableName argument
+      String entityTableName = commandLine.getOptionValue(
+          ENTITY_TABLE_NAME_SHORT);
+      if (StringUtils.isNotBlank(entityTableName)) {
+        hbaseConf.set(EntityTable.TABLE_NAME_CONF_NAME, entityTableName);
       }
-      createAllTables(hbaseConf, skipExisting);
-      LOG.info("Successfully created HBase schema. ");
-    } catch (IOException e) {
-      LOG.error("Error in creating hbase tables: " + e.getMessage());
-      exceptions.add(e);
-    }
-
-    if (exceptions.size() > 0) {
-      LOG.warn("Schema creation finished with the following exceptions");
-      for (Exception e : exceptions) {
-        LOG.warn(e.getMessage());
+      // Grab the TTL argument
+      String entityTableTTLMetrics =commandLine.getOptionValue(
+          TTL_OPTION_SHORT);
+      if (StringUtils.isNotBlank(entityTableTTLMetrics)) {
+        int metricsTTL = Integer.parseInt(entityTableTTLMetrics);
+        new EntityTable().setMetricsTTL(metricsTTL, hbaseConf);
       }
-      System.exit(-1);
+      // Grab the appToflowTableName argument
+      String appToflowTableName = commandLine.getOptionValue(
+          APP_TO_FLOW_TABLE_NAME_SHORT);
+      if (StringUtils.isNotBlank(appToflowTableName)) {
+        hbaseConf.set(AppToFlowTable.TABLE_NAME_CONF_NAME, appToflowTableName);
+      }
+      // Grab the applicationTableName argument
+      String applicationTableName = commandLine.getOptionValue(
+          APP_TABLE_NAME_SHORT);
+      if (StringUtils.isNotBlank(applicationTableName)) {
+        hbaseConf.set(ApplicationTable.TABLE_NAME_CONF_NAME,
+            applicationTableName);
+      }
+
+      // create all table schemas in hbase
+      final boolean skipExisting = commandLine.hasOption(
+          SKIP_EXISTING_TABLE_OPTION_SHORT);
+      createAllSchemas(hbaseConf, skipExisting);
     } else {
-      LOG.info("Schema creation finished successfully");
+      // print usage information if -create is not specified
+      printUsage();
     }
   }
 
@@ -138,7 +130,16 @@ public final class TimelineSchemaCreator {
     Options options = new Options();
 
     // Input
-    Option o = new Option(ENTITY_TABLE_NAME_SHORT, "entityTableName", true,
+    Option o = new Option(HELP_SHORT, "help", false, "print help information");
+    o.setRequired(false);
+    options.addOption(o);
+
+    o = new Option(CREATE_TABLES_SHORT, "create", false,
+        "a mandatory option to create hbase tables");
+    o.setRequired(false);
+    options.addOption(o);
+
+    o = new Option(ENTITY_TABLE_NAME_SHORT, "entityTableName", true,
         "entity table name");
     o.setArgName("entityTableName");
     o.setRequired(false);
@@ -183,6 +184,57 @@ public final class TimelineSchemaCreator {
     return commandLine;
   }
 
+  private static void printUsage() {
+    StringBuilder usage = new StringBuilder("Command Usage: \n");
+    usage.append("TimelineSchemaCreator [-help] Display help info" +
+        " for all commands. Or\n");
+    usage.append("TimelineSchemaCreator -create [OPTIONAL_OPTIONS]" +
+        " Create hbase tables.\n\n");
+    usage.append("The Optional options for creating tables include: \n");
+    usage.append("[-entityTableName <Entity Table Name>] " +
+        "The name of the Entity table\n");
+    usage.append("[-metricsTTL <Entity Table Metrics TTL>]" +
+        " TTL for metrics in the Entity table\n");
+    usage.append("[-appToflowTableName <AppToflow Table Name>]" +
+        " The name of the AppToFlow table\n");
+    usage.append("[-applicationTableName <Application Table Name>]" +
+        " The name of the Application table\n");
+    usage.append("[-skipExistingTable] Whether to skip existing" +
+        " hbase tables\n");
+    System.out.println(usage.toString());
+  }
+
+  /**
+   * Create all table schemas and log success or exception if failed.
+   * @param hbaseConf the hbase configuration to create tables with
+   * @param skipExisting whether to skip existing hbase tables
+   */
+  private static void createAllSchemas(Configuration hbaseConf,
+      boolean skipExisting) {
+    List<Exception> exceptions = new ArrayList<>();
+    try {
+      if (skipExisting) {
+        LOG.info("Will skip existing tables and continue on htable creation "
+            + "exceptions!");
+      }
+      createAllTables(hbaseConf, skipExisting);
+      LOG.info("Successfully created HBase schema. ");
+    } catch (IOException e) {
+      LOG.error("Error in creating hbase tables: " + e.getMessage());
+      exceptions.add(e);
+    }
+
+    if (exceptions.size() > 0) {
+      LOG.warn("Schema creation finished with the following exceptions");
+      for (Exception e : exceptions) {
+        LOG.warn(e.getMessage());
+      }
+      System.exit(-1);
+    } else {
+      LOG.info("Schema creation finished successfully");
+    }
+  }
+
   @VisibleForTesting
   public static void createAllTables(Configuration hbaseConf,
       boolean skipExisting) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d95c82cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index bcbe0b7..04822c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -183,11 +183,12 @@ to a dynamically (table coprocessor).
 
 Finally, run the schema creator tool to create the necessary tables:
 
-    bin/hbase org.apache.hadoop.yarn.server.timelineservice.storage.TimelineSchemaCreator
+    bin/hadoop org.apache.hadoop.yarn.server.timelineservice.storage.TimelineSchemaCreator -create
 
 The `TimelineSchemaCreator` tool supports a few options that may come handy especially when you
 are testing. For example, you can use `-skipExistingTable` (`-s` for short) to skip existing tables
-and continue to create other tables rather than failing the schema creation.
+and continue to create other tables rather than failing the schema creation. When no option or '-help'
+('-h' for short) is provided, the command usage is printed.
 
 #### Enabling Timeline Service v.2
 Following are the basic configurations to start Timeline service v.2:


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: HADOOP-14456. Modifier 'static' is redundant for inner enums. Contributed by ZhangBing Lin.

Posted by xg...@apache.org.
HADOOP-14456. Modifier 'static' is redundant for inner enums. Contributed by ZhangBing Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62857be2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62857be2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62857be2

Branch: refs/heads/YARN-5734
Commit: 62857be2110aaded84a93fc9891742a1271b2b85
Parents: af03c33
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Wed May 31 01:07:58 2017 +0800
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Wed May 31 01:07:58 2017 +0800

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/crypto/OpensslCipher.java  | 4 ++--
 .../main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java | 2 +-
 .../src/main/java/org/apache/hadoop/fs/Options.java            | 2 +-
 .../org/apache/hadoop/fs/shell/CommandWithDestination.java     | 2 +-
 .../main/java/org/apache/hadoop/ha/ActiveStandbyElector.java   | 4 ++--
 .../src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java  | 2 +-
 .../src/main/java/org/apache/hadoop/io/SequenceFile.java       | 2 +-
 .../hadoop/io/compress/zlib/BuiltInGzipDecompressor.java       | 2 +-
 .../org/apache/hadoop/io/compress/zlib/ZlibCompressor.java     | 6 +++---
 .../org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java   | 2 +-
 .../main/java/org/apache/hadoop/io/file/tfile/Compression.java | 2 +-
 .../src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java  | 2 +-
 .../src/main/java/org/apache/hadoop/ipc/Server.java            | 2 +-
 .../main/java/org/apache/hadoop/security/SaslRpcServer.java    | 4 ++--
 .../java/org/apache/hadoop/security/UserGroupInformation.java  | 2 +-
 .../main/java/org/apache/hadoop/security/ssl/SSLFactory.java   | 2 +-
 .../token/delegation/web/DelegationTokenAuthenticator.java     | 2 +-
 .../src/main/java/org/apache/hadoop/util/StringUtils.java      | 2 +-
 .../test/java/org/apache/hadoop/fs/FileContextTestHelper.java  | 2 +-
 .../test/java/org/apache/hadoop/fs/FileSystemTestHelper.java   | 2 +-
 .../src/test/java/org/apache/hadoop/io/TestIOUtils.java        | 2 +-
 .../org/apache/hadoop/io/retry/UnreliableImplementation.java   | 2 +-
 .../main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java | 2 +-
 .../src/main/java/org/apache/hadoop/mount/MountInterface.java  | 2 +-
 .../src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java | 2 +-
 .../main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java | 2 +-
 .../src/main/java/org/apache/hadoop/oncrpc/RpcMessage.java     | 2 +-
 27 files changed, 32 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
index 2eb16ee..6a03bb6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
@@ -47,7 +47,7 @@ public final class OpensslCipher {
   public static final int DECRYPT_MODE = 0;
   
   /** Currently only support AES/CTR/NoPadding. */
-  private static enum AlgMode {
+  private enum AlgMode {
     AES_CTR;
     
     static int get(String algorithm, String mode) 
@@ -61,7 +61,7 @@ public final class OpensslCipher {
     }
   }
   
-  private static enum Padding {
+  private enum Padding {
     NoPadding;
     
     static int get(String padding) throws NoSuchPaddingException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
index 74052eb..8411ffb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
@@ -198,7 +198,7 @@ public class ValueQueue <E> {
    * "n" values and Queue is empty.
    * This decides how many values to return when client calls "getAtMost"
    */
-  public static enum SyncGenerationPolicy {
+  public enum SyncGenerationPolicy {
     ATLEAST_ONE, // Return atleast 1 value
     LOW_WATERMARK, // Return min(n, lowWatermark * numValues) values
     ALL // Return n values

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
index dc50286..0ac53b2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
@@ -211,7 +211,7 @@ public final class Options {
   /**
    * Enum to support the varargs for rename() options
    */
-  public static enum Rename {
+  public enum Rename {
     NONE((byte) 0), // No options
     OVERWRITE((byte) 1), // Overwrite the rename destination
     TO_TRASH ((byte) 2); // Rename to trash

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
index 2ab0a26..578d22b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
@@ -118,7 +118,7 @@ abstract class CommandWithDestination extends FsCommand {
     }
   }
   
-  protected static enum FileAttribute {
+  protected enum FileAttribute {
     TIMESTAMPS, OWNERSHIP, PERMISSION, ACL, XATTR;
 
     public static FileAttribute getAttribute(char symbol) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
index db853f4..dec401e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
@@ -145,11 +145,11 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
 
   private static final int SLEEP_AFTER_FAILURE_TO_BECOME_ACTIVE = 1000;
 
-  private static enum ConnectionState {
+  private enum ConnectionState {
     DISCONNECTED, CONNECTED, TERMINATED
   };
 
-  static enum State {
+  enum State {
     INIT, ACTIVE, STANDBY, NEUTRAL
   };
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
index 7de12c7..7099de8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
@@ -65,7 +65,7 @@ public interface HAServiceProtocol {
     }
   }
   
-  public static enum RequestSource {
+  public enum RequestSource {
     REQUEST_BY_USER,
     REQUEST_BY_USER_FORCED,
     REQUEST_BY_ZKFC;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
index de0bf4f..475d272 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
@@ -230,7 +230,7 @@ public class SequenceFile {
    * 
    * @see SequenceFile.Writer
    */
-  public static enum CompressionType {
+  public enum CompressionType {
     /** Do not compress records. */
     NONE, 
     /** Compress values only, each separately. */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java
index 2dee03a..b4c6659 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java
@@ -68,7 +68,7 @@ public class BuiltInGzipDecompressor implements Decompressor {
    * (Technically, the private variables localBuf through hasHeaderCRC are
    * also part of the state, so this enum is merely the label for it.)
    */
-  private static enum GzipStateLabel {
+  private enum GzipStateLabel {
     /**
      * Immediately prior to or (strictly) within the 10-byte basic gzip header.
      */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
index 24d98a5..6396fcb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
@@ -57,7 +57,7 @@ public class ZlibCompressor implements Compressor {
   /**
    * The compression level for zlib library.
    */
-  public static enum CompressionLevel {
+  public enum CompressionLevel {
     /**
      * Compression level for no compression.
      */
@@ -133,7 +133,7 @@ public class ZlibCompressor implements Compressor {
   /**
    * The compression level for zlib library.
    */
-  public static enum CompressionStrategy {
+  public enum CompressionStrategy {
     /**
      * Compression strategy best used for data consisting mostly of small
      * values with a somewhat random distribution. Forces more Huffman coding
@@ -178,7 +178,7 @@ public class ZlibCompressor implements Compressor {
   /**
    * The type of header for compressed data.
    */
-  public static enum CompressionHeader {
+  public enum CompressionHeader {
     /**
      * No headers/trailers/checksums.
      */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
index d728fad..dd550b9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
@@ -49,7 +49,7 @@ public class ZlibDecompressor implements Decompressor {
   /**
    * The headers to detect from compressed data.
    */
-  public static enum CompressionHeader {
+  public enum CompressionHeader {
     /**
      * No headers/trailers/checksums.
      */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
index f7ec7ac..596b7ea 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
@@ -75,7 +75,7 @@ final class Compression {
   /**
    * Compression algorithms.
    */
-  static enum Algorithm {
+  enum Algorithm {
     LZO(TFile.COMPRESSION_LZO) {
       private transient boolean checked = false;
       private static final String defaultClazz =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
index f3ff1c7..b51c905 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
@@ -570,7 +570,7 @@ public class NativeIO {
     private static native String getOwner(FileDescriptor fd) throws IOException;
 
     /** Supported list of Windows access right flags */
-    public static enum AccessRight {
+    public enum AccessRight {
       ACCESS_READ (0x0001),      // FILE_READ_DATA
       ACCESS_WRITE (0x0002),     // FILE_WRITE_DATA
       ACCESS_EXECUTE (0x0020);   // FILE_EXECUTE

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 3ca1524..3ea5a24 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1552,7 +1552,7 @@ public abstract class Server {
   }
 
   @InterfaceAudience.Private
-  public static enum AuthProtocol {
+  public enum AuthProtocol {
     NONE(0),
     SASL(-33);
     

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
index 377a0f1..a6fbb6d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
@@ -68,7 +68,7 @@ public class SaslRpcServer {
   public static final String SASL_DEFAULT_REALM = "default";
   private static SaslServerFactory saslFactory;
 
-  public static enum QualityOfProtection {
+  public enum QualityOfProtection {
     AUTHENTICATION("auth"),
     INTEGRITY("auth-int"),
     PRIVACY("auth-conf");
@@ -218,7 +218,7 @@ public class SaslRpcServer {
 
   /** Authentication method */
   @InterfaceStability.Evolving
-  public static enum AuthMethod {
+  public enum AuthMethod {
     SIMPLE((byte) 80, ""),
     KERBEROS((byte) 81, "GSSAPI"),
     @Deprecated

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index a5c6226..0510abd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -1510,7 +1510,7 @@ public class UserGroupInformation {
    */
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
-  public static enum AuthenticationMethod {
+  public enum AuthenticationMethod {
     // currently we support only one auth per method, but eventually a 
     // subtype is needed to differentiate, ex. if digest is token or ldap
     SIMPLE(AuthMethod.SIMPLE,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
index 1c50488..f05274a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
@@ -57,7 +57,7 @@ public class SSLFactory implements ConnectionConfigurator {
   static final Logger LOG = LoggerFactory.getLogger(SSLFactory.class);
 
   @InterfaceAudience.Private
-  public static enum Mode { CLIENT, SERVER }
+  public enum Mode { CLIENT, SERVER }
 
   public static final String SSL_CLIENT_CONF_KEY = "hadoop.ssl.client.conf";
   public static final String SSL_CLIENT_CONF_DEFAULT = "ssl-client.xml";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
index 9f39569..87a5697 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
@@ -74,7 +74,7 @@ public abstract class DelegationTokenAuthenticator implements Authenticator {
    * DelegationToken operations.
    */
   @InterfaceAudience.Private
-  public static enum DelegationTokenOperation {
+  public enum DelegationTokenOperation {
     GETDELEGATIONTOKEN(HTTP_GET, true),
     RENEWDELEGATIONTOKEN(HTTP_PUT, true),
     CANCELDELEGATIONTOKEN(HTTP_PUT, false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index 72bd171..cda5ec7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -759,7 +759,7 @@ public class StringUtils {
    * which can be represented by a 64-bit integer.
    * TraditionalBinaryPrefix symbol are case insensitive. 
    */
-  public static enum TraditionalBinaryPrefix {
+  public enum TraditionalBinaryPrefix {
     KILO(10),
     MEGA(KILO.bitShift + 10),
     GIGA(MEGA.bitShift + 10),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
index 5bf842c..e3a4a12 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
@@ -216,7 +216,7 @@ public final class FileContextTestHelper {
     return containsPath(fc, new Path(path), dirList);
   }
   
-  public static enum fileType {isDir, isFile, isSymlink};
+  public enum fileType {isDir, isFile, isSymlink};
   
   public static void checkFileStatus(FileContext aFc, String path,
       fileType expectedType) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
index c9d1781..ef9e094 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
@@ -237,7 +237,7 @@ public class FileSystemTestHelper {
     return containsPath(fSys, new Path(path), dirList);
   }
   
-  public static enum fileType {isDir, isFile, isSymlink};
+  public enum fileType {isDir, isFile, isSymlink};
   public static void checkFileStatus(FileSystem aFs, String path,
       fileType expectedType) throws IOException {
     FileStatus s = aFs.getFileStatus(new Path(path));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
index ac4fb48..cac73bd 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
@@ -253,7 +253,7 @@ public class TestIOUtils {
     }
   }
 
-  private static enum NoEntry3Filter implements FilenameFilter {
+  private enum NoEntry3Filter implements FilenameFilter {
     INSTANCE;
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
index 9387772..7ce8abc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
@@ -36,7 +36,7 @@ class UnreliableImplementation implements UnreliableInterface {
   private String identifier;
   private TypeOfExceptionToFailWith exceptionToFailWith;
   
-  public static enum TypeOfExceptionToFailWith {
+  public enum TypeOfExceptionToFailWith {
     UNRELIABLE_EXCEPTION,
     STANDBY_EXCEPTION,
     IO_EXCEPTION,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
index 4e84e62..5873c7a 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
@@ -60,7 +60,7 @@ import java.util.Map;
 @InterfaceAudience.Private
 public class KMS {
 
-  public static enum KMSOp {
+  public enum KMSOp {
     CREATE_KEY, DELETE_KEY, ROLL_NEW_VERSION, INVALIDATE_CACHE,
     GET_KEYS, GET_KEYS_METADATA,
     GET_KEY_VERSIONS, GET_METADATA, GET_KEY_VERSION, GET_CURRENT_KEY,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountInterface.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountInterface.java
index 9d2693d..4e6d0d1 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountInterface.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountInterface.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.oncrpc.XDR;
  */
 public interface MountInterface {
   /** Mount procedures */
-  public static enum MNTPROC {
+  public enum MNTPROC {
     // the order of the values below are significant.
     NULL,
     MNT,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java
index de587a6..ed8e442 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java
@@ -32,7 +32,7 @@ public class Nfs3Constant {
   public final static int VERSION = 3;
   
   // The procedures
-  public static enum NFSPROC3 {
+  public enum NFSPROC3 {
     // the order of the values below are significant.
     NULL,
     GETATTR,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java
index e8e637c..3db4485 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java
@@ -41,7 +41,7 @@ public class SetAttr3 {
   private NfsTime mtime;
   private EnumSet<SetAttrField> updateFields;
 
-  public static enum SetAttrField {
+  public enum SetAttrField {
     MODE, UID, GID, SIZE, ATIME, MTIME
   };
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcMessage.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcMessage.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcMessage.java
index fff015e..85cec76 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcMessage.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcMessage.java
@@ -22,7 +22,7 @@ package org.apache.hadoop.oncrpc;
  */
 public abstract class RpcMessage {
   /** Message type */
-  public static enum Type {
+  public enum Type {
     // the order of the values below are significant.
     RPC_CALL,
     RPC_REPLY;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: HDFS-11893. Fix TestDFSShell.testMoveWithTargetPortEmpty failure. Contributed by Brahma Reddy Battula.

Posted by xg...@apache.org.
HDFS-11893. Fix TestDFSShell.testMoveWithTargetPortEmpty failure. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0dcf843c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0dcf843c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0dcf843c

Branch: refs/heads/YARN-5734
Commit: 0dcf843c008f2b9cece8c0a0cef78140398ac464
Parents: f5517a8
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Thu Jun 1 22:29:29 2017 +0800
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Thu Jun 1 22:29:29 2017 +0800

----------------------------------------------------------------------
 .../src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java         | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0dcf843c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 3f6b268..c82c045 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -789,7 +789,7 @@ public class TestDFSShell {
       argv = new String[3];
       argv[0] = "-mv";
       argv[1] = srcFs.getUri() + "/testfile";
-      argv[2] = "hdfs://localhost/testfile2";
+      argv[2] = "hdfs://" + srcFs.getUri().getHost() + "/testfile2";
       int ret = ToolRunner.run(shell, argv);
       assertEquals("mv should have succeeded", 0, ret);
     } finally {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: HADOOP-14428. s3a: mkdir appears to be broken. Contributed by Mingliang Liu

Posted by xg...@apache.org.
HADOOP-14428. s3a: mkdir appears to be broken. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6aeda55b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6aeda55b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6aeda55b

Branch: refs/heads/YARN-5734
Commit: 6aeda55bb8f741d9dafd41f6dfbf1a88acdd4003
Parents: abdd609
Author: Mingliang Liu <li...@apache.org>
Authored: Wed May 24 14:44:27 2017 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Mon Jun 5 11:26:56 2017 -0700

----------------------------------------------------------------------
 .../fs/contract/AbstractContractMkdirTest.java  | 31 ++++++++++++--------
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  5 +++-
 2 files changed, 23 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6aeda55b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
index 71d2706..c5a546d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
@@ -113,18 +113,25 @@ public abstract class AbstractContractMkdirTest extends AbstractFSContractTestBa
     describe("verify mkdir slash handling");
     FileSystem fs = getFileSystem();
 
-    // No trailing slash
-    assertTrue(fs.mkdirs(path("testmkdir/a")));
-    assertPathExists("mkdir without trailing slash failed",
-        path("testmkdir/a"));
-
-    // With trailing slash
-    assertTrue(fs.mkdirs(path("testmkdir/b/")));
-    assertPathExists("mkdir with trailing slash failed", path("testmkdir/b/"));
-
-    // Mismatched slashes
-    assertPathExists("check path existence without trailing slash failed",
-        path("testmkdir/b"));
+    final Path[] paths = new Path[] {
+        path("testMkdirSlashHandling/a"), // w/o trailing slash
+        path("testMkdirSlashHandling/b/"), // w/ trailing slash
+        // unqualified w/o trailing slash
+        new Path(getContract().getTestPath() + "/testMkdirSlashHandling/c"),
+        // unqualified w/ trailing slash
+        new Path(getContract().getTestPath() + "/testMkdirSlashHandling/d/"),
+        // unqualified w/ multiple trailing slashes
+        new Path(getContract().getTestPath() + "/testMkdirSlashHandling/e///")
+    };
+    for (Path path : paths) {
+      assertTrue(fs.mkdirs(path));
+      assertPathExists(path + " does not exist after mkdirs", path);
+      assertIsDirectory(path);
+      if (path.toString().endsWith("/")) {
+        String s = path.toString().substring(0, path.toString().length() - 1);
+        assertIsDirectory(new Path(s));
+      }
+    }
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6aeda55b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 25f2671..872dd5f 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -1587,7 +1587,9 @@ public class S3AFileSystem extends FileSystem {
 
       String key = pathToKey(f);
       createFakeDirectory(key);
-      deleteUnnecessaryFakeDirectories(f.getParent());
+      // this is complicated because getParent(a/b/c/) returns a/b/c, but
+      // we want a/b. See HADOOP-14428 for more details.
+      deleteUnnecessaryFakeDirectories(new Path(f.toString()).getParent());
       return true;
     }
   }
@@ -1971,6 +1973,7 @@ public class S3AFileSystem extends FileSystem {
     while (!path.isRoot()) {
       String key = pathToKey(path);
       key = (key.endsWith("/")) ? key : (key + "/");
+      LOG.trace("To delete unnecessary fake directory {} for {}", key, path);
       keysToRemove.add(new DeleteObjectsRequest.KeyVersion(key));
       path = path.getParent();
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: YARN-6366. Refactor the NodeManager DeletionService to support additional DeletionTask types. Contributed by Shane Kumpf.

Posted by xg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
index 5f9b883..c1df562 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.server.nodemanager;
 
 import static org.mockito.Matchers.argThat;
-import static org.mockito.Matchers.isNull;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
@@ -28,6 +27,7 @@ import java.io.File;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -58,19 +58,17 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.security.NMTokenIdentifier;
-import org.apache.hadoop.yarn.server.nodemanager.DeletionService.FileDeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionMatcher;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.ArgumentMatcher;
 
 public class TestNodeManagerReboot {
 
@@ -195,19 +193,18 @@ public class TestNodeManagerReboot {
     // restart the NodeManager
     restartNM(MAX_TRIES);
     checkNumOfLocalDirs();
-    
-    verify(delService, times(1)).delete(
-      (String) isNull(),
-      argThat(new PathInclude(ResourceLocalizationService.NM_PRIVATE_DIR
-          + "_DEL_")));
-    verify(delService, times(1)).delete((String) isNull(),
-      argThat(new PathInclude(ContainerLocalizer.FILECACHE + "_DEL_")));
-    verify(delService, times(1)).scheduleFileDeletionTask(
-      argThat(new FileDeletionInclude(user, null,
-        new String[] { destinationFile })));
-    verify(delService, times(1)).scheduleFileDeletionTask(
-      argThat(new FileDeletionInclude(null, ContainerLocalizer.USERCACHE
-          + "_DEL_", new String[] {})));
+
+    verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+        delService, null,
+        new Path(ResourceLocalizationService.NM_PRIVATE_DIR + "_DEL_"), null)));
+    verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+        delService, null, new Path(ContainerLocalizer.FILECACHE + "_DEL_"),
+        null)));
+    verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+        delService, user, null, Arrays.asList(new Path(destinationFile)))));
+    verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+        delService, null, new Path(ContainerLocalizer.USERCACHE + "_DEL_"),
+        new ArrayList<Path>())));
     
     // restart the NodeManager again
     // this time usercache directory should be empty
@@ -329,72 +326,4 @@ public class TestNodeManagerReboot {
       return conf;
     }
   }
-
-  class PathInclude extends ArgumentMatcher<Path> {
-
-    final String part;
-
-    PathInclude(String part) {
-      this.part = part;
-    }
-
-    @Override
-    public boolean matches(Object o) {
-      return ((Path) o).getName().indexOf(part) != -1;
-    }
-  }
-  
-  class FileDeletionInclude extends ArgumentMatcher<FileDeletionTask> {
-    final String user;
-    final String subDirIncludes;
-    final String[] baseDirIncludes;
-    
-    public FileDeletionInclude(String user, String subDirIncludes,
-        String [] baseDirIncludes) {
-      this.user = user;
-      this.subDirIncludes = subDirIncludes;
-      this.baseDirIncludes = baseDirIncludes;
-    }
-    
-    @Override
-    public boolean matches(Object o) {
-      FileDeletionTask fd = (FileDeletionTask)o;
-      if (fd.getUser() == null && user != null) {
-        return false;
-      } else if (fd.getUser() != null && user == null) {
-        return false;
-      } else if (fd.getUser() != null && user != null) {
-        return fd.getUser().equals(user);
-      }
-      if (!comparePaths(fd.getSubDir(), subDirIncludes)) {
-        return false;
-      }
-      if (baseDirIncludes == null && fd.getBaseDirs() != null) {
-        return false;
-      } else if (baseDirIncludes != null && fd.getBaseDirs() == null ) {
-        return false;
-      } else if (baseDirIncludes != null && fd.getBaseDirs() != null) {
-        if (baseDirIncludes.length != fd.getBaseDirs().size()) {
-          return false;
-        }
-        for (int i =0 ; i < baseDirIncludes.length; i++) {
-          if (!comparePaths(fd.getBaseDirs().get(i), baseDirIncludes[i])) {
-            return false;
-          }
-        }
-      }
-      return true;
-    }
-    
-    public boolean comparePaths(Path p1, String p2) {
-      if (p1 == null && p2 != null){
-        return false;
-      } else if (p1 != null && p2 == null) {
-        return false;
-      } else if (p1 != null && p2 != null ){
-        return p1.toUri().getPath().contains(p2.toString());
-      }
-      return true;
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/TestNMProtoUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/TestNMProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/TestNMProtoUtils.java
new file mode 100644
index 0000000..69e01bc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/TestNMProtoUtils.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.api.impl.pb;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.recovery.DeletionTaskRecoveryInfo;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTaskType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.List;
+
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.mock;
+
+/**
+ * Test conversion to {@link DeletionTask}.
+ */
+public class TestNMProtoUtils {
+
+  @Test
+  public void testConvertProtoToDeletionTask() throws Exception {
+    DeletionService deletionService = mock(DeletionService.class);
+    DeletionServiceDeleteTaskProto.Builder protoBuilder =
+        DeletionServiceDeleteTaskProto.newBuilder();
+    int id = 0;
+    protoBuilder.setId(id);
+    DeletionServiceDeleteTaskProto proto = protoBuilder.build();
+    DeletionTask deletionTask =
+        NMProtoUtils.convertProtoToDeletionTask(proto, deletionService);
+    assertEquals(DeletionTaskType.FILE, deletionTask.getDeletionTaskType());
+    assertEquals(id, deletionTask.getTaskId());
+  }
+
+  @Test
+  public void testConvertProtoToFileDeletionTask() throws Exception {
+    DeletionService deletionService = mock(DeletionService.class);
+    int id = 0;
+    String user = "user";
+    Path subdir = new Path("subdir");
+    Path basedir = new Path("basedir");
+    DeletionServiceDeleteTaskProto.Builder protoBuilder =
+        DeletionServiceDeleteTaskProto.newBuilder();
+    protoBuilder
+        .setId(id)
+        .setUser("user")
+        .setSubdir(subdir.getName())
+        .addBasedirs(basedir.getName());
+    DeletionServiceDeleteTaskProto proto = protoBuilder.build();
+    DeletionTask deletionTask =
+        NMProtoUtils.convertProtoToFileDeletionTask(proto, deletionService, id);
+    assertEquals(DeletionTaskType.FILE.name(),
+        deletionTask.getDeletionTaskType().name());
+    assertEquals(id, deletionTask.getTaskId());
+    assertEquals(subdir, ((FileDeletionTask) deletionTask).getSubDir());
+    assertEquals(basedir,
+        ((FileDeletionTask) deletionTask).getBaseDirs().get(0));
+  }
+
+  @Test
+  public void testConvertProtoToDeletionTaskRecoveryInfo() throws Exception {
+    long delTime = System.currentTimeMillis();
+    List<Integer> successorTaskIds = Arrays.asList(1);
+    DeletionTask deletionTask = mock(DeletionTask.class);
+    DeletionTaskRecoveryInfo info =
+        new DeletionTaskRecoveryInfo(deletionTask, successorTaskIds, delTime);
+    assertEquals(deletionTask, info.getTask());
+    assertEquals(successorTaskIds, info.getSuccessorTaskIds());
+    assertEquals(delTime, info.getDeletionTimestamp());
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
index 2991c0c..7980a80 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
@@ -75,6 +75,7 @@ import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
 import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
@@ -260,10 +261,10 @@ public abstract class BaseContainerManagerTest {
   protected DeletionService createDeletionService() {
     return new DeletionService(exec) {
       @Override
-      public void delete(String user, Path subDir, Path... baseDirs) {
+      public void delete(DeletionTask deletionTask) {
         // Don't do any deletions.
-        LOG.info("Psuedo delete: user - " + user + ", subDir - " + subDir
-            + ", baseDirs - " + Arrays.asList(baseDirs));
+        LOG.info("Psuedo delete: user - " + user
+            + ", type - " + deletionTask.getDeletionTaskType());
       };
     };
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionMatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionMatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionMatcher.java
new file mode 100644
index 0000000..faad456
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionMatcher.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.mockito.ArgumentMatcher;
+
+import java.util.List;
+
+/**
+ * ArgumentMatcher to check the arguments of the {@link FileDeletionTask}.
+ */
+public class FileDeletionMatcher extends ArgumentMatcher<FileDeletionTask> {
+
+  private final DeletionService delService;
+  private final String user;
+  private final Path subDirIncludes;
+  private final List<Path> baseDirIncludes;
+
+  public FileDeletionMatcher(DeletionService delService, String user,
+      Path subDirIncludes, List<Path> baseDirIncludes) {
+    this.delService = delService;
+    this.user = user;
+    this.subDirIncludes = subDirIncludes;
+    this.baseDirIncludes = baseDirIncludes;
+  }
+
+  @Override
+  public boolean matches(Object o) {
+    FileDeletionTask fd = (FileDeletionTask) o;
+    if (fd.getUser() == null && user != null) {
+      return false;
+    } else if (fd.getUser() != null && user == null) {
+      return false;
+    } else if (fd.getUser() != null && user != null) {
+      return fd.getUser().equals(user);
+    }
+    if (!comparePaths(fd.getSubDir(), subDirIncludes.getName())) {
+      return false;
+    }
+    if (baseDirIncludes == null && fd.getBaseDirs() != null) {
+      return false;
+    } else if (baseDirIncludes != null && fd.getBaseDirs() == null) {
+      return false;
+    } else if (baseDirIncludes != null && fd.getBaseDirs() != null) {
+      if (baseDirIncludes.size() != fd.getBaseDirs().size()) {
+        return false;
+      }
+      for (int i = 0; i < baseDirIncludes.size(); i++) {
+        if (!comparePaths(fd.getBaseDirs().get(i),
+            baseDirIncludes.get(i).getName())) {
+          return false;
+        }
+      }
+    }
+    return true;
+  }
+
+  public boolean comparePaths(Path p1, String p2) {
+    if (p1 == null && p2 != null) {
+      return false;
+    } else if (p1 != null && p2 == null) {
+      return false;
+    } else if (p1 != null && p2 != null) {
+      return p1.toUri().getPath().contains(p2.toString());
+    }
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/TestFileDeletionTask.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/TestFileDeletionTask.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/TestFileDeletionTask.java
new file mode 100644
index 0000000..fd2e4fb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/TestFileDeletionTask.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+
+/**
+ * Test the attributes of the {@link FileDeletionTask} class.
+ */
+public class TestFileDeletionTask {
+
+  private static final int ID = 0;
+  private static final String USER = "user";
+  private static final Path SUBDIR = new Path("subdir");
+  private static final Path BASEDIR = new Path("basedir");
+
+  private List<Path> baseDirs = new ArrayList<>();
+  private DeletionService deletionService;
+  private FileDeletionTask deletionTask;
+
+  @Before
+  public void setUp() throws Exception {
+    deletionService = mock(DeletionService.class);
+    baseDirs.add(BASEDIR);
+    deletionTask = new FileDeletionTask(ID, deletionService, USER, SUBDIR,
+        baseDirs);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    baseDirs.clear();
+  }
+
+  @Test
+  public void testGetUser() throws Exception {
+    assertEquals(USER, deletionTask.getUser());
+  }
+
+  @Test
+  public void testGetSubDir() throws Exception {
+    assertEquals(SUBDIR, deletionTask.getSubDir());
+  }
+
+  @Test
+  public void testGetBaseDirs() throws Exception {
+    assertEquals(1, deletionTask.getBaseDirs().size());
+    assertEquals(baseDirs, deletionTask.getBaseDirs());
+  }
+
+  @Test
+  public void testConvertDeletionTaskToProto() throws Exception {
+    DeletionServiceDeleteTaskProto proto =
+        deletionTask.convertDeletionTaskToProto();
+    assertEquals(ID, proto.getId());
+    assertEquals(USER, proto.getUser());
+    assertEquals(SUBDIR, new Path(proto.getSubdir()));
+    assertEquals(BASEDIR, new Path(proto.getBasedirs(0)));
+    assertEquals(1, proto.getBasedirsCount());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
index 2874acb..6cab593 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
 
 import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.argThat;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Matchers.isA;
 import static org.mockito.Mockito.mock;
@@ -54,6 +55,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResourceFailedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResourceLocalizedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionMatcher;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEventType;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerResourceRequestEvent;
@@ -823,7 +825,8 @@ public class TestLocalResourcesTrackerImpl {
       Path rPath = tracker.getPathForLocalization(req1, base_path,
           delService);
       Assert.assertFalse(lfs.util().exists(rPath));
-      verify(delService, times(1)).delete(eq(user), eq(conflictPath));
+      verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+          delService, user, conflictPath, null)));
     } finally {
       lfs.delete(base_path, true);
       if (dispatcher != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
index 89cbeb4..d863c6a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
@@ -31,7 +31,6 @@ import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.argThat;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Matchers.isA;
-import static org.mockito.Matchers.isNull;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
@@ -68,6 +67,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionMatcher;
 import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
 import org.junit.Assert;
 import org.apache.commons.io.FileUtils;
@@ -1066,7 +1066,8 @@ public class TestResourceLocalizationService {
       verify(containerBus, times(3)).handle(argThat(matchesContainerLoc));
         
       // Verify deletion of localization token.
-      verify(delService).delete((String)isNull(), eq(localizationTokenPath));
+      verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+          delService, null, localizationTokenPath, null)));
     } finally {
       spyService.stop();
       dispatcher.stop();
@@ -1340,8 +1341,8 @@ public class TestResourceLocalizationService {
       Thread.sleep(50);
     }
     // Verify if downloading resources were submitted for deletion.
-    verify(delService).delete(eq(user), (Path) eq(null),
-        argThat(new DownloadingPathsMatcher(paths)));
+    verify(delService, times(2)).delete(argThat(new FileDeletionMatcher(
+        delService, user, null, new ArrayList<>(paths))));
 
     LocalResourcesTracker tracker = spyService.getLocalResourcesTracker(
         LocalResourceVisibility.PRIVATE, "user0", appId);
@@ -2753,15 +2754,19 @@ public class TestResourceLocalizationService {
       for (int i = 0; i < containerLocalDirs.size(); ++i) {
         if (i == 2) {
           try {
-            verify(delService).delete(user, containerLocalDirs.get(i));
-            verify(delService).delete(null, nmLocalContainerDirs.get(i));
+            verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+                delService, user, containerLocalDirs.get(i), null)));
+            verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+                delService, null, nmLocalContainerDirs.get(i), null)));
             Assert.fail("deletion attempts for invalid dirs");
           } catch (Throwable e) {
             continue;
           }
         } else {
-          verify(delService).delete(user, containerLocalDirs.get(i));
-          verify(delService).delete(null, nmLocalContainerDirs.get(i));
+          verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+              delService, user, containerLocalDirs.get(i), null)));
+          verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+              delService, null, nmLocalContainerDirs.get(i), null)));
         }
       }
 
@@ -2802,15 +2807,19 @@ public class TestResourceLocalizationService {
       for (int i = 0; i < containerLocalDirs.size(); ++i) {
         if (i == 3) {
           try {
-            verify(delService).delete(user, containerLocalDirs.get(i));
-            verify(delService).delete(null, nmLocalContainerDirs.get(i));
+            verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+                delService, user, containerLocalDirs.get(i), null)));
+            verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+                delService, null, nmLocalContainerDirs.get(i), null)));
             Assert.fail("deletion attempts for invalid dirs");
           } catch (Throwable e) {
             continue;
           }
         } else {
-          verify(delService).delete(user, appLocalDirs.get(i));
-          verify(delService).delete(null, nmLocalAppDirs.get(i));
+          verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+              delService, user, containerLocalDirs.get(i), null)));
+          verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+              delService, null, nmLocalContainerDirs.get(i), null)));
         }
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
index 097146b..b4bd9d7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
@@ -42,17 +42,16 @@ import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
-import org.mockito.Matchers;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -311,16 +310,18 @@ public class TestAppLogAggregatorImpl {
         @Override
         public Void answer(InvocationOnMock invocationOnMock) throws Throwable {
           Set<String> paths = new HashSet<>();
-          Object[] args = invocationOnMock.getArguments();
-          for(int i = 2; i < args.length; i++) {
-            Path path = (Path) args[i];
-            paths.add(path.toUri().getRawPath());
+          Object[] tasks = invocationOnMock.getArguments();
+          for(int i = 0; i < tasks.length; i++) {
+            FileDeletionTask task = (FileDeletionTask) tasks[i];
+            for (Path path: task.getBaseDirs()) {
+              paths.add(path.toUri().getRawPath());
+            }
           }
           verifyFilesToDelete(expectedPathsForDeletion, paths);
           return null;
         }
       }).doNothing().when(deletionServiceWithExpectedFiles).delete(
-          any(String.class), any(Path.class), Matchers.<Path>anyVararg());
+          any(FileDeletionTask.class));
 
     return deletionServiceWithExpectedFiles;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
index bc1b4b0..37fe77a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
@@ -120,6 +120,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Ap
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionMatcher;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.TestNonAggregatingLogHandler;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent;
@@ -218,8 +220,10 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     // ensure filesystems were closed
     verify(logAggregationService).closeFileSystems(
         any(UserGroupInformation.class));
-    verify(delSrvc).delete(eq(user), eq((Path) null),
-      eq(new Path(app1LogDir.getAbsolutePath())));
+    List<Path> dirList = new ArrayList<>();
+    dirList.add(new Path(app1LogDir.toURI()));
+    verify(delSrvc, times(2)).delete(argThat(new FileDeletionMatcher(
+        delSrvc, user, null, dirList)));
     
     String containerIdStr = container11.toString();
     File containerLogDir = new File(app1LogDir, containerIdStr);
@@ -333,7 +337,9 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     logAggregationService.stop();
     delSrvc.stop();
     // Aggregated logs should not be deleted if not uploaded.
-    verify(delSrvc, times(0)).delete(user, null);
+    FileDeletionTask deletionTask = new FileDeletionTask(delSrvc, user, null,
+        null);
+    verify(delSrvc, times(0)).delete(deletionTask);
   }
 
   @Test
@@ -815,8 +821,9 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     assertEquals(0, logAggregationService.getNumAggregators());
     // local log dir shouldn't be deleted given log aggregation cannot
     // continue due to aggregated log dir creation failure on remoteFS.
-    verify(spyDelSrvc, never()).delete(eq(user), any(Path.class),
-        Mockito.<Path>anyVararg());
+    FileDeletionTask deletionTask = new FileDeletionTask(spyDelSrvc, user,
+        null, null);
+    verify(spyDelSrvc, never()).delete(deletionTask);
     verify(logAggregationService).closeFileSystems(
         any(UserGroupInformation.class));
     // make sure local log dir is not deleted in case log aggregation

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
index ec3757e..7a4ea88 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
@@ -22,12 +22,14 @@ import static org.junit.Assert.assertFalse;
 
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.argThat;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Matchers.isA;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
 import java.io.File;
@@ -36,6 +38,7 @@ import java.io.IOException;
 import java.io.NotSerializableException;
 import java.io.ObjectInputStream;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.HashMap;
@@ -66,6 +69,7 @@ import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionMatcher;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent;
@@ -531,8 +535,8 @@ public class TestNonAggregatingLogHandler {
     boolean matched = false;
     while (!matched && System.currentTimeMillis() < verifyStartTime + timeout) {
       try {
-        verify(delService).delete(eq(user), (Path) eq(null),
-          Mockito.argThat(new DeletePathsMatcher(matchPaths)));
+        verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+            delService, user, null, Arrays.asList(matchPaths))));
         matched = true;
       } catch (WantedButNotInvoked e) {
         notInvokedException = e;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: HADOOP-14442. Owner support for ranger-wasb integration. Contributed by Varada Hemeswari

Posted by xg...@apache.org.
HADOOP-14442. Owner support for ranger-wasb integration. Contributed by Varada Hemeswari


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89bb8bfe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89bb8bfe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89bb8bfe

Branch: refs/heads/YARN-5734
Commit: 89bb8bfe582ba85566cede321b233bb642f1c675
Parents: bd6a217
Author: Mingliang Liu <li...@apache.org>
Authored: Fri May 26 17:52:56 2017 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Fri May 26 17:54:00 2017 -0700

----------------------------------------------------------------------
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  48 +++++++-
 .../fs/azure/RemoteWasbAuthorizerImpl.java      |  12 +-
 .../fs/azure/WasbAuthorizerInterface.java       |   3 +-
 .../hadoop/fs/azure/MockWasbAuthorizerImpl.java |  52 ++++++--
 .../TestNativeAzureFileSystemAuthorization.java |  90 ++++----------
 ...veAzureFileSystemAuthorizationWithOwner.java | 122 +++++++++++++++++++
 6 files changed, 247 insertions(+), 80 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89bb8bfe/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 0ba47ef..b61baab 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -200,7 +200,7 @@ public class NativeAzureFileSystem extends FileSystem {
       JsonNode oldFolderName = json.get("OldFolderName");
       JsonNode newFolderName = json.get("NewFolderName");
       if (oldFolderName == null || newFolderName == null) {
-    	  this.committed = false;
+        this.committed = false;
       } else {
         this.srcKey = oldFolderName.textValue();
         this.dstKey = newFolderName.textValue();
@@ -349,7 +349,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
       return contents;
     }
-    
+
     /**
      * This is an exact copy of org.codehaus.jettison.json.JSONObject.quote 
      * method.
@@ -639,7 +639,7 @@ public class NativeAzureFileSystem extends FileSystem {
     return "wasb";
   }
 
-  
+
   /**
    * <p>
    * A {@link FileSystem} for reading and writing files stored on <a
@@ -1441,12 +1441,14 @@ public class NativeAzureFileSystem extends FileSystem {
       requestingAccessForPath = requestingAccessForPath.makeQualified(getUri(), getWorkingDirectory());
       originalPath = originalPath.makeQualified(getUri(), getWorkingDirectory());
 
-      if (!this.authorizer.authorize(requestingAccessForPath.toString(), accessType.toString())) {
+      String owner = getOwnerForPath(requestingAccessForPath);
+
+      if (!this.authorizer.authorize(requestingAccessForPath.toString(), accessType.toString(), owner)) {
         throw new WasbAuthorizationException(operation
             + " operation for Path : " + originalPath.toString() + " not allowed");
       }
 
-    }
+   }
   }
 
   /**
@@ -3173,4 +3175,40 @@ public class NativeAzureFileSystem extends FileSystem {
     // Return to the caller with the randomized key.
     return randomizedKey;
   }
+
+  /*
+   * Helper method to retrieve owner information for a given path.
+   * The method returns empty string in case the file is not found or the metadata does not contain owner information
+  */
+  @VisibleForTesting
+  public String getOwnerForPath(Path absolutePath) throws IOException {
+    String owner = "";
+    FileMetadata meta = null;
+    String key = pathToKey(absolutePath);
+    try {
+
+      meta = store.retrieveMetadata(key);
+
+      if (meta != null) {
+        owner = meta.getPermissionStatus().getUserName();
+        LOG.debug("Retrieved '{}' as owner for path - {}", owner, absolutePath);
+      } else {
+        // meta will be null if file/folder doen not exist
+        LOG.debug("Cannot find file/folder - '{}'. Returning owner as empty string", absolutePath);
+      }
+    } catch(IOException ex) {
+
+          Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(ex);
+          boolean isfileNotFoundException = innerException instanceof StorageException
+            && NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException);
+
+          // should not throw when the exception is related to blob/container/file/folder not found
+          if (!isfileNotFoundException) {
+            String errorMsg = "Could not retrieve owner information for path - " + absolutePath;
+            LOG.error(errorMsg);
+            throw new IOException(errorMsg, ex);
+          }
+      }
+    return owner;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89bb8bfe/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
index ea08b2b..3c912d7 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
@@ -87,6 +87,12 @@ public class RemoteWasbAuthorizerImpl implements WasbAuthorizerInterface {
   private static final String DELEGATION_TOKEN_QUERY_PARAM_NAME =
       "delegation";
 
+  /**
+   *  Query parameter name for sending owner of the specific resource {@value}
+   */
+  private static final String WASB_RESOURCE_OWNER_QUERY_PARAM_NAME =
+      "wasb_resource_owner";
+
   private WasbRemoteCallHelper remoteCallHelper = null;
   private String delegationToken;
   private boolean isSecurityEnabled;
@@ -119,7 +125,7 @@ public class RemoteWasbAuthorizerImpl implements WasbAuthorizerInterface {
   }
 
   @Override
-  public boolean authorize(String wasbAbsolutePath, String accessType)
+  public boolean authorize(String wasbAbsolutePath, String accessType, String resourceOwner)
       throws WasbAuthorizationException, IOException {
 
       try {
@@ -140,6 +146,10 @@ public class RemoteWasbAuthorizerImpl implements WasbAuthorizerInterface {
           uriBuilder.addParameter(DELEGATION_TOKEN_QUERY_PARAM_NAME,
               delegationToken);
         }
+        if (resourceOwner != null && StringUtils.isNotEmpty(resourceOwner)) {
+          uriBuilder.addParameter(WASB_RESOURCE_OWNER_QUERY_PARAM_NAME,
+              resourceOwner);
+        }
 
         String responseBody = null;
         UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89bb8bfe/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbAuthorizerInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbAuthorizerInterface.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbAuthorizerInterface.java
index 57d7516..af0e954 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbAuthorizerInterface.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbAuthorizerInterface.java
@@ -43,10 +43,11 @@ public interface WasbAuthorizerInterface {
 
    * @param wasbAbolutePath : Absolute WASB Path used for access.
    * @param accessType : Type of access
+   * @param owner : owner of the file/folder specified in the wasb path
    * @return : true - If access allowed false - If access is not allowed.
    * @throws WasbAuthorizationException - On authorization exceptions
    * @throws IOException - When not able to reach the authorizer
    */
-  boolean authorize(String wasbAbolutePath, String accessType)
+  boolean authorize(String wasbAbolutePath, String accessType, String owner)
       throws WasbAuthorizationException, IOException;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89bb8bfe/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
index 0b3422c..90a6b51 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
@@ -22,7 +22,9 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.regex.Pattern;
 
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.fs.Path;
 
 /**
@@ -32,8 +34,9 @@ import org.apache.hadoop.fs.Path;
 public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
 
   private Map<AuthorizationComponent, Boolean> authRules;
+  private boolean performOwnerMatch;
 
-  // The full qualified URL to the root directory
+ // The full qualified URL to the root directory
   private String qualifiedPrefixUrl;
 
   public MockWasbAuthorizerImpl(NativeAzureFileSystem fs) {
@@ -43,15 +46,22 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
 
   @Override
   public void init(Configuration conf) {
+    init(conf, false);
+  }
+
+  /*
+  authorization matches owner with currentUserShortName while evaluating auth rules
+  if currentUserShortName is set to a string that is not empty
+  */
+  public void init(Configuration conf, boolean matchOwner) {
     authRules = new HashMap<AuthorizationComponent, Boolean>();
+    this.performOwnerMatch = matchOwner;
   }
 
   public void addAuthRule(String wasbAbsolutePath,
       String accessType, boolean access) {
-
-    wasbAbsolutePath = qualifiedPrefixUrl + wasbAbsolutePath;
-
-    AuthorizationComponent component = wasbAbsolutePath.endsWith("*")
+        wasbAbsolutePath = qualifiedPrefixUrl + wasbAbsolutePath;
+        AuthorizationComponent component = wasbAbsolutePath.endsWith("*")
         ? new AuthorizationComponent("^" + wasbAbsolutePath.replace("*", ".*"), accessType)
         : new AuthorizationComponent(wasbAbsolutePath, accessType);
 
@@ -59,18 +69,40 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
   }
 
   @Override
-  public boolean authorize(String wasbAbsolutePath, String accessType)
+  public boolean authorize(String wasbAbsolutePath, String accessType, String owner)
       throws WasbAuthorizationException {
 
     if (wasbAbsolutePath.endsWith(NativeAzureFileSystem.FolderRenamePending.SUFFIX)) {
       return true;
     }
 
+    String currentUserShortName = "";
+    if (this.performOwnerMatch) {
+      try {
+        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+        currentUserShortName = ugi.getShortUserName();
+      } catch (Exception e) {
+        //no op
+      }
+    }
+
+    // In case of root("/"), owner match does not happen because owner is returned as empty string.
+    // we try to force owner match just for purpose of tests to make sure all operations work seemlessly with owner.
+    if (this.performOwnerMatch
+      && StringUtils.equalsIgnoreCase(wasbAbsolutePath, qualifiedPrefixUrl + "/")) {
+      owner = currentUserShortName;
+    }
+
+    boolean shouldEvaluateOwnerAccess = owner != null && !owner.isEmpty()
+      && this.performOwnerMatch;
+
+    boolean isOwnerMatch =  StringUtils.equalsIgnoreCase(currentUserShortName, owner);
+
     AuthorizationComponent component =
         new AuthorizationComponent(wasbAbsolutePath, accessType);
 
     if (authRules.containsKey(component)) {
-      return authRules.get(component);
+      return shouldEvaluateOwnerAccess ? isOwnerMatch && authRules.get(component) : authRules.get(component);
     } else {
       // Regex-pattern match if we don't have a straight match
       for (Map.Entry<AuthorizationComponent, Boolean> entry : authRules.entrySet()) {
@@ -79,12 +111,16 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
         String keyAccess = key.getAccessType();
 
         if (keyPath.endsWith("*") && Pattern.matches(keyPath, wasbAbsolutePath) && keyAccess.equals(accessType)) {
-          return entry.getValue();
+          return shouldEvaluateOwnerAccess ? isOwnerMatch && entry.getValue() : entry.getValue();
         }
       }
       return false;
     }
   }
+
+  public void deleteAllAuthRules() {
+    authRules.clear();
+  }
 }
 
 class AuthorizationComponent {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89bb8bfe/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
index f7a2eb7..a0276cb5 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
@@ -29,6 +29,7 @@ import org.junit.Rule;
 import org.junit.Test;
 
 import org.junit.rules.ExpectedException;
+import com.google.common.annotations.VisibleForTesting;
 
 import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECURE_MODE;
 
@@ -38,6 +39,9 @@ import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECU
 public class TestNativeAzureFileSystemAuthorization
   extends AbstractWasbTestBase {
 
+  @VisibleForTesting
+  protected MockWasbAuthorizerImpl authorizer;
+
   @Override
   protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
     Configuration conf = new Configuration();
@@ -54,9 +58,8 @@ public class TestNativeAzureFileSystemAuthorization
     Assume.assumeTrue("Test valid when both SecureMode and Authorization are enabled .. skipping",
         useSecureMode && useAuthorization);
 
-    Assume.assumeTrue(
-        useSecureMode && useAuthorization
-    );
+    authorizer = new MockWasbAuthorizerImpl(fs);
+    authorizer.init(null);
   }
 
 
@@ -66,12 +69,12 @@ public class TestNativeAzureFileSystemAuthorization
   /**
    * Setup up permissions to allow a recursive delete for cleanup purposes.
    */
-  private void allowRecursiveDelete(NativeAzureFileSystem fs, MockWasbAuthorizerImpl authorizer, String path) {
+  protected void allowRecursiveDelete(NativeAzureFileSystem fs, String path) {
 
     int index = path.lastIndexOf('/');
     String parent = (index == 0) ? "/" : path.substring(0, index);
 
-    authorizer.init(null);
+    authorizer.deleteAllAuthRules();
     authorizer.addAuthRule(parent, WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule((path.endsWith("*") ? path : path+"*"), WasbAuthorizationOperations.WRITE.toString(), true);
     fs.updateWasbAuthorizer(authorizer);
@@ -80,7 +83,7 @@ public class TestNativeAzureFileSystemAuthorization
   /**
    * Setup the expected exception class, and exception message that the test is supposed to fail with
    */
-  private void setExpectedFailureMessage(String operation, Path path) {
+  protected void setExpectedFailureMessage(String operation, Path path) {
     expectedEx.expect(WasbAuthorizationException.class);
     expectedEx.expectMessage(String.format("%s operation for Path : %s not allowed",
         operation, path.makeQualified(fs.getUri(), fs.getWorkingDirectory())));
@@ -98,8 +101,6 @@ public class TestNativeAzureFileSystemAuthorization
     Path parentDir = new Path("/");
     Path testPath = new Path(parentDir, "test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
     fs.updateWasbAuthorizer(authorizer);
@@ -126,8 +127,6 @@ public class TestNativeAzureFileSystemAuthorization
     Path parentDir = new Path("/testCreateAccessCheckPositive/1/2/3");
     Path testPath = new Path(parentDir, "test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
     fs.updateWasbAuthorizer(authorizer);
@@ -137,7 +136,7 @@ public class TestNativeAzureFileSystemAuthorization
       ContractTestUtils.assertPathExists(fs, "testPath was not created", testPath);
     }
     finally {
-      allowRecursiveDelete(fs, authorizer, "/testCreateAccessCheckPositive");
+      allowRecursiveDelete(fs, "/testCreateAccessCheckPositive");
       fs.delete(new Path("/testCreateAccessCheckPositive"), true);
     }
   }
@@ -156,8 +155,6 @@ public class TestNativeAzureFileSystemAuthorization
 
     setExpectedFailureMessage("create", testPath);
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
     fs.updateWasbAuthorizer(authorizer);
@@ -186,8 +183,6 @@ public class TestNativeAzureFileSystemAuthorization
     Path parentDir = new Path("/");
     Path testPath = new Path(parentDir, "test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
@@ -219,8 +214,6 @@ public class TestNativeAzureFileSystemAuthorization
 
     setExpectedFailureMessage("create", testPath);
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), false);
     fs.updateWasbAuthorizer(authorizer);
 
@@ -229,7 +222,7 @@ public class TestNativeAzureFileSystemAuthorization
     }
     finally {
       /* Provide permissions to cleanup in case the file got created */
-      allowRecursiveDelete(fs, authorizer, parentDir.toString());
+      allowRecursiveDelete(fs, parentDir.toString());
       fs.delete(parentDir, true);
     }
   }
@@ -245,8 +238,6 @@ public class TestNativeAzureFileSystemAuthorization
     Path intermediateFolders = new Path(parentDir, "1/2/3/");
     Path testPath = new Path(intermediateFolders, "test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
     fs.updateWasbAuthorizer(authorizer);
@@ -256,7 +247,7 @@ public class TestNativeAzureFileSystemAuthorization
       fs.listStatus(testPath);
     }
     finally {
-      allowRecursiveDelete(fs, authorizer, parentDir.toString());
+      allowRecursiveDelete(fs, parentDir.toString());
       fs.delete(parentDir, true);
     }
   }
@@ -274,8 +265,6 @@ public class TestNativeAzureFileSystemAuthorization
 
     setExpectedFailureMessage("liststatus", testPath);
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), false);
     fs.updateWasbAuthorizer(authorizer);
@@ -285,7 +274,7 @@ public class TestNativeAzureFileSystemAuthorization
       fs.listStatus(testPath);
     }
     finally {
-      allowRecursiveDelete(fs, authorizer, parentDir.toString());
+      allowRecursiveDelete(fs, parentDir.toString());
       fs.delete(parentDir, true);
     }
   }
@@ -301,8 +290,6 @@ public class TestNativeAzureFileSystemAuthorization
     Path srcPath = new Path(parentDir, "test1.dat");
     Path dstPath = new Path(parentDir, "test2.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true); /* to create parentDir */
     authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true); /* for rename */
     authorizer.addAuthRule(srcPath.toString(), WasbAuthorizationOperations.READ.toString(), true); /* for exists */
@@ -317,7 +304,7 @@ public class TestNativeAzureFileSystemAuthorization
       ContractTestUtils.assertPathDoesNotExist(fs, "sourcePath exists after rename!", srcPath);
     }
     finally {
-      allowRecursiveDelete(fs, authorizer, parentDir.toString());
+      allowRecursiveDelete(fs, parentDir.toString());
       fs.delete(parentDir, true);
     }
   }
@@ -335,8 +322,6 @@ public class TestNativeAzureFileSystemAuthorization
 
     setExpectedFailureMessage("rename", srcPath);
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true); /* to create parent dir */
     authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), false);
     authorizer.addAuthRule(srcPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
@@ -351,7 +336,7 @@ public class TestNativeAzureFileSystemAuthorization
     } finally {
       ContractTestUtils.assertPathExists(fs, "sourcePath does not exist after rename failure!", srcPath);
 
-      allowRecursiveDelete(fs, authorizer, parentDir.toString());
+      allowRecursiveDelete(fs, parentDir.toString());
       fs.delete(parentDir, true);
     }
   }
@@ -370,8 +355,6 @@ public class TestNativeAzureFileSystemAuthorization
 
     setExpectedFailureMessage("rename", dstPath);
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true); /* to create parent dir */
     authorizer.addAuthRule(parentSrcDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(parentDstDir.toString(), WasbAuthorizationOperations.WRITE.toString(), false);
@@ -386,7 +369,7 @@ public class TestNativeAzureFileSystemAuthorization
       ContractTestUtils.assertPathDoesNotExist(fs, "destPath does not exist", dstPath);
     } finally {
       ContractTestUtils.assertPathExists(fs, "sourcePath does not exist after rename !", srcPath);
-      allowRecursiveDelete(fs, authorizer, parentSrcDir.toString());
+      allowRecursiveDelete(fs, parentSrcDir.toString());
       fs.delete(parentSrcDir, true);
     }
   }
@@ -403,8 +386,6 @@ public class TestNativeAzureFileSystemAuthorization
     Path parentDstDir = new Path("/testRenameAccessCheckPositiveDst");
     Path dstPath = new Path(parentDstDir, "test2.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true); /* to create parent dirs */
     authorizer.addAuthRule(parentSrcDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(parentDstDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
@@ -420,10 +401,10 @@ public class TestNativeAzureFileSystemAuthorization
       ContractTestUtils.assertPathDoesNotExist(fs, "sourcePath does not exist", srcPath);
       ContractTestUtils.assertPathExists(fs, "destPath does not exist", dstPath);
     } finally {
-      allowRecursiveDelete(fs, authorizer, parentSrcDir.toString());
+      allowRecursiveDelete(fs, parentSrcDir.toString());
       fs.delete(parentSrcDir, true);
 
-      allowRecursiveDelete(fs, authorizer, parentDstDir.toString());
+      allowRecursiveDelete(fs, parentDstDir.toString());
       fs.delete(parentDstDir, true);
     }
   }
@@ -438,8 +419,6 @@ public class TestNativeAzureFileSystemAuthorization
     Path parentDir = new Path("/testReadAccessCheckPositive");
     Path testPath = new Path(parentDir, "test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
     fs.updateWasbAuthorizer(authorizer);
@@ -463,7 +442,7 @@ public class TestNativeAzureFileSystemAuthorization
       if(inputStream != null) {
         inputStream.close();
       }
-      allowRecursiveDelete(fs, authorizer, parentDir.toString());
+      allowRecursiveDelete(fs, parentDir.toString());
       fs.delete(parentDir, true);
     }
   }
@@ -481,8 +460,6 @@ public class TestNativeAzureFileSystemAuthorization
 
     setExpectedFailureMessage("read", testPath);
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), false);
     fs.updateWasbAuthorizer(authorizer);
@@ -505,7 +482,7 @@ public class TestNativeAzureFileSystemAuthorization
       if (inputStream != null) {
         inputStream.close();
       }
-      allowRecursiveDelete(fs, authorizer, parentDir.toString());
+      allowRecursiveDelete(fs, parentDir.toString());
       fs.delete(parentDir, true);
     }
   }
@@ -520,8 +497,6 @@ public class TestNativeAzureFileSystemAuthorization
     Path parentDir = new Path("/");
     Path testPath = new Path(parentDir, "test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
     fs.updateWasbAuthorizer(authorizer);
@@ -547,8 +522,6 @@ public class TestNativeAzureFileSystemAuthorization
 
     setExpectedFailureMessage("delete", testPath);
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
     fs.updateWasbAuthorizer(authorizer);
@@ -558,7 +531,7 @@ public class TestNativeAzureFileSystemAuthorization
 
 
       /* Remove permissions for delete to force failure */
-      authorizer.init(null);
+      authorizer.deleteAllAuthRules();
       authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), false);
       fs.updateWasbAuthorizer(authorizer);
 
@@ -566,7 +539,7 @@ public class TestNativeAzureFileSystemAuthorization
     }
     finally {
       /* Restore permissions to force a successful delete */
-      authorizer.init(null);
+      authorizer.deleteAllAuthRules();
       authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
       authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
       fs.updateWasbAuthorizer(authorizer);
@@ -587,8 +560,6 @@ public class TestNativeAzureFileSystemAuthorization
     Path parentDir = new Path("/testDeleteIntermediateFolder");
     Path testPath = new Path(parentDir, "1/2/test.dat");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true); // for create and delete
     authorizer.addAuthRule("/testDeleteIntermediateFolder*",
         WasbAuthorizationOperations.WRITE.toString(), true); // for recursive delete
@@ -602,7 +573,7 @@ public class TestNativeAzureFileSystemAuthorization
       ContractTestUtils.assertPathDoesNotExist(fs, "testPath exists after deletion!", parentDir);
     }
     finally {
-      allowRecursiveDelete(fs, authorizer, parentDir.toString());
+      allowRecursiveDelete(fs, parentDir.toString());
       fs.delete(parentDir, true);
     }
   }
@@ -616,8 +587,6 @@ public class TestNativeAzureFileSystemAuthorization
 
     Path testPath = new Path("/");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.READ.toString(), true);
     fs.updateWasbAuthorizer(authorizer);
 
@@ -635,8 +604,6 @@ public class TestNativeAzureFileSystemAuthorization
 
     setExpectedFailureMessage("getFileStatus", testPath);
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.READ.toString(), false);
     fs.updateWasbAuthorizer(authorizer);
 
@@ -652,8 +619,6 @@ public class TestNativeAzureFileSystemAuthorization
 
     Path testPath = new Path("/testMkdirsAccessCheckPositive/1/2/3");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
     fs.updateWasbAuthorizer(authorizer);
@@ -663,7 +628,7 @@ public class TestNativeAzureFileSystemAuthorization
       ContractTestUtils.assertIsDirectory(fs, testPath);
     }
     finally {
-      allowRecursiveDelete(fs, authorizer, "/testMkdirsAccessCheckPositive");
+      allowRecursiveDelete(fs, "/testMkdirsAccessCheckPositive");
       fs.delete(new Path("/testMkdirsAccessCheckPositive"), true);
     }
   }
@@ -679,8 +644,6 @@ public class TestNativeAzureFileSystemAuthorization
 
     setExpectedFailureMessage("mkdirs", testPath);
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), false);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
     fs.updateWasbAuthorizer(authorizer);
@@ -690,12 +653,11 @@ public class TestNativeAzureFileSystemAuthorization
       ContractTestUtils.assertPathDoesNotExist(fs, "testPath was not created", testPath);
     }
     finally {
-      allowRecursiveDelete(fs, authorizer, "/testMkdirsAccessCheckNegative");
+      allowRecursiveDelete(fs, "/testMkdirsAccessCheckNegative");
       fs.delete(new Path("/testMkdirsAccessCheckNegative"), true);
     }
   }
 
-
   /**
    * Positive test triple slash format (wasb:///) access check
    * @throws Throwable
@@ -705,8 +667,6 @@ public class TestNativeAzureFileSystemAuthorization
 
     Path testPath = new Path("/");
 
-    MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
-    authorizer.init(null);
     authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
     fs.updateWasbAuthorizer(authorizer);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89bb8bfe/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java
new file mode 100644
index 0000000..3329e67
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import java.security.PrivilegedExceptionAction;
+
+import org.apache.hadoop.fs.Path;
+import org.junit.Test;
+import org.junit.Before;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Test class that runs wasb authorization tests with owner check enabled.
+ */
+public class TestNativeAzureFileSystemAuthorizationWithOwner
+  extends TestNativeAzureFileSystemAuthorization {
+
+  @Before
+  public void beforeMethod() {
+    super.beforeMethod();
+    authorizer.init(null, true);
+  }
+
+  /**
+   * Test case when owner matches current user
+   */
+  @Test
+  public void testOwnerPermissionPositive() throws Throwable {
+
+    Path parentDir = new Path("/testOwnerPermissionPositive");
+    Path testPath = new Path(parentDir, "test.data");
+
+    authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
+    authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
+    authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
+    // additional rule used for assertPathExists
+    authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.READ.toString(), true);
+    fs.updateWasbAuthorizer(authorizer);
+
+    try {
+      // creates parentDir with owner as current user
+      fs.mkdirs(parentDir);
+      ContractTestUtils.assertPathExists(fs, "parentDir does not exist", parentDir);
+
+      fs.create(testPath);
+      fs.getFileStatus(testPath);
+      ContractTestUtils.assertPathExists(fs, "testPath does not exist", testPath);
+
+    } finally {
+      allowRecursiveDelete(fs, parentDir.toString());
+      fs.delete(parentDir, true);
+    }
+  }
+
+  /**
+   * Negative test case for owner does not match current user
+   */
+  @Test
+  public void testOwnerPermissionNegative() throws Throwable {
+    expectedEx.expect(WasbAuthorizationException.class);
+
+    Path parentDir = new Path("/testOwnerPermissionNegative");
+    Path childDir = new Path(parentDir, "childDir");
+
+    setExpectedFailureMessage("mkdirs", childDir);
+
+    authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
+    authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
+
+    fs.updateWasbAuthorizer(authorizer);
+
+    try{
+      fs.mkdirs(parentDir);
+      UserGroupInformation ugiSuperUser = UserGroupInformation.createUserForTesting(
+          "testuser", new String[] {});
+
+      ugiSuperUser.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+          fs.mkdirs(childDir);
+          return null;
+        }
+      });
+
+    } finally {
+       allowRecursiveDelete(fs, parentDir.toString());
+       fs.delete(parentDir, true);
+    }
+  }
+
+  /**
+   * Test to verify that retrieving owner information does not
+   * throw when file/folder does not exist
+   */
+  @Test
+  public void testRetrievingOwnerDoesNotFailWhenFileDoesNotExist() throws Throwable {
+
+    Path testdirectory = new Path("/testDirectory123454565");
+
+    String owner = fs.getOwnerForPath(testdirectory);
+    assertEquals("", owner);
+  }
+}
+


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: HDFS-5042. Completed files lost after power failure. Contributed by Vinayakumar B.

Posted by xg...@apache.org.
HDFS-5042. Completed files lost after power failure. Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1543d0f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1543d0f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1543d0f5

Branch: refs/heads/YARN-5734
Commit: 1543d0f5be6a02ad00e7a33e35d78af8516043e3
Parents: cbfed0e
Author: Kihwal Lee <ki...@apache.org>
Authored: Wed May 31 10:55:03 2017 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Wed May 31 10:55:03 2017 -0500

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/io/IOUtils.java | 55 +++++++++++++++++++-
 .../hdfs/server/datanode/BlockReceiver.java     |  9 +++-
 .../hdfs/server/datanode/FileIoProvider.java    | 19 ++++++-
 .../hdfs/server/datanode/LocalReplica.java      | 13 +++++
 .../server/datanode/fsdataset/FsDatasetSpi.java |  4 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 31 ++++++++---
 .../server/datanode/SimulatedFSDataset.java     |  3 +-
 .../datanode/TestDataNodeHotSwapVolumes.java    |  6 ++-
 .../server/datanode/TestSimulatedFSDataset.java |  4 +-
 .../extdataset/ExternalDatasetImpl.java         |  3 +-
 .../fsdataset/impl/TestFsDatasetImpl.java       |  2 +-
 11 files changed, 130 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1543d0f5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index 0d2e797..ee7264b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -27,6 +27,7 @@ import java.nio.file.DirectoryStream;
 import java.nio.file.DirectoryIteratorException;
 import java.nio.file.Files;
 import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -36,7 +37,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.util.ChunkedArrayList;
+import org.apache.hadoop.util.Shell;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -357,4 +358,56 @@ public class IOUtils {
     }
     return list;
   }
+
+  /**
+   * Ensure that any writes to the given file is written to the storage device
+   * that contains it. This method opens channel on given File and closes it
+   * once the sync is done.<br>
+   * Borrowed from Uwe Schindler in LUCENE-5588
+   * @param fileToSync the file to fsync
+   */
+  public static void fsync(File fileToSync) throws IOException {
+    if (!fileToSync.exists()) {
+      throw new FileNotFoundException(
+          "File/Directory " + fileToSync.getAbsolutePath() + " does not exist");
+    }
+    boolean isDir = fileToSync.isDirectory();
+    // If the file is a directory we have to open read-only, for regular files
+    // we must open r/w for the fsync to have an effect. See
+    // http://blog.httrack.com/blog/2013/11/15/
+    // everything-you-always-wanted-to-know-about-fsync/
+    try(FileChannel channel = FileChannel.open(fileToSync.toPath(),
+        isDir ? StandardOpenOption.READ : StandardOpenOption.WRITE)){
+      fsync(channel, isDir);
+    }
+  }
+
+  /**
+   * Ensure that any writes to the given file is written to the storage device
+   * that contains it. This method opens channel on given File and closes it
+   * once the sync is done.
+   * Borrowed from Uwe Schindler in LUCENE-5588
+   * @param channel Channel to sync
+   * @param isDir if true, the given file is a directory (Channel should be
+   *          opened for read and ignore IOExceptions, because not all file
+   *          systems and operating systems allow to fsync on a directory)
+   * @throws IOException
+   */
+  public static void fsync(FileChannel channel, boolean isDir)
+      throws IOException {
+    try {
+      channel.force(true);
+    } catch (IOException ioe) {
+      if (isDir) {
+        assert !(Shell.LINUX
+            || Shell.MAC) : "On Linux and MacOSX fsyncing a directory"
+                + " should not throw IOException, we just don't want to rely"
+                + " on that in production (undocumented)" + ". Got: " + ioe;
+        // Ignore exception if it is a directory
+        return;
+      }
+      // Throw original exception
+      throw ioe;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1543d0f5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index a0e646d..0077700 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -125,6 +125,7 @@ class BlockReceiver implements Closeable {
   private boolean isPenultimateNode = false;
 
   private boolean syncOnClose;
+  private volatile boolean dirSyncOnFinalize;
   private long restartBudget;
   /** the reference of the volume where the block receiver writes to */
   private ReplicaHandler replicaHandler;
@@ -547,6 +548,9 @@ class BlockReceiver implements Closeable {
     // avoid double sync'ing on close
     if (syncBlock && lastPacketInBlock) {
       this.syncOnClose = false;
+      // sync directory for finalize irrespective of syncOnClose config since
+      // sync is requested.
+      this.dirSyncOnFinalize = true;
     }
 
     // update received bytes
@@ -937,6 +941,7 @@ class BlockReceiver implements Closeable {
       boolean isReplaceBlock) throws IOException {
 
     syncOnClose = datanode.getDnConf().syncOnClose;
+    dirSyncOnFinalize = syncOnClose;
     boolean responderClosed = false;
     mirrorOut = mirrOut;
     mirrorAddr = mirrAddr;
@@ -979,7 +984,7 @@ class BlockReceiver implements Closeable {
           } else {
             // for isDatnode or TRANSFER_FINALIZED
             // Finalize the block.
-            datanode.data.finalizeBlock(block);
+            datanode.data.finalizeBlock(block, dirSyncOnFinalize);
           }
         }
         datanode.metrics.incrBlocksWritten();
@@ -1502,7 +1507,7 @@ class BlockReceiver implements Closeable {
         BlockReceiver.this.close();
         endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
         block.setNumBytes(replicaInfo.getNumBytes());
-        datanode.data.finalizeBlock(block);
+        datanode.data.finalizeBlock(block, dirSyncOnFinalize);
       }
 
       if (pinning) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1543d0f5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
index 694eadd..b8e08d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
@@ -149,7 +149,24 @@ public class FileIoProvider {
     final long begin = profilingEventHook.beforeFileIo(volume, SYNC, 0);
     try {
       faultInjectorEventHook.beforeFileIo(volume, SYNC, 0);
-      fos.getChannel().force(true);
+      IOUtils.fsync(fos.getChannel(), false);
+      profilingEventHook.afterFileIo(volume, SYNC, begin, 0);
+    } catch (Exception e) {
+      onFailure(volume, begin);
+      throw e;
+    }
+  }
+
+  /**
+   * Sync the given directory changes to durable device.
+   * @throws IOException
+   */
+  public void dirSync(@Nullable FsVolumeSpi volume, File dir)
+      throws IOException {
+    final long begin = profilingEventHook.beforeFileIo(volume, SYNC, 0);
+    try {
+      faultInjectorEventHook.beforeFileIo(volume, SYNC, 0);
+      IOUtils.fsync(dir);
       profilingEventHook.afterFileIo(volume, SYNC, begin, 0);
     } catch (Exception e) {
       onFailure(volume, begin);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1543d0f5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
index 1d46ddd..2c5af11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
@@ -510,4 +510,17 @@ abstract public class LocalReplica extends ReplicaInfo {
       metaRAF.write(b, 0, checksumsize);
     }
   }
+
+  /**
+   * Sync the parent directory changes to durable device.
+   * @throws IOException
+   */
+  public void fsyncDirectory() throws IOException {
+    File dir = getDir();
+    try {
+      getFileIoProvider().dirSync(getVolume(), getDir());
+    } catch (IOException e) {
+      throw new IOException("Failed to sync " + dir, e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1543d0f5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
index fd3af5d..7be42e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
@@ -394,12 +394,14 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
    * Finalizes the block previously opened for writing using writeToBlock.
    * The block size is what is in the parameter b and it must match the amount
    *  of data written
+   * @param block Block to be finalized
+   * @param fsyncDir whether to sync the directory changes to durable device.
    * @throws IOException
    * @throws ReplicaNotFoundException if the replica can not be found when the
    * block is been finalized. For instance, the block resides on an HDFS volume
    * that has been removed.
    */
-  void finalizeBlock(ExtendedBlock b) throws IOException;
+  void finalizeBlock(ExtendedBlock b, boolean fsyncDir) throws IOException;
 
   /**
    * Unfinalizes the block previously opened for writing using writeToBlock.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1543d0f5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index eb4455b..11835a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -59,6 +59,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.ExtendedBlockId;
 import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
+import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
+import org.apache.hadoop.hdfs.server.datanode.LocalReplica;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -987,7 +989,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
         replicaInfo, smallBufferSize, conf);
 
     // Finalize the copied files
-    newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);
+    newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo,
+        false);
     try (AutoCloseableLock lock = datasetLock.acquire()) {
       // Increment numBlocks here as this block moved without knowing to BPS
       FsVolumeImpl volume = (FsVolumeImpl) newReplicaInfo.getVolume();
@@ -1290,7 +1293,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
           replicaInfo.bumpReplicaGS(newGS);
           // finalize the replica if RBW
           if (replicaInfo.getState() == ReplicaState.RBW) {
-            finalizeReplica(b.getBlockPoolId(), replicaInfo);
+            finalizeReplica(b.getBlockPoolId(), replicaInfo, false);
           }
           return replicaInfo;
         }
@@ -1604,7 +1607,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    * Complete the block write!
    */
   @Override // FsDatasetSpi
-  public void finalizeBlock(ExtendedBlock b) throws IOException {
+  public void finalizeBlock(ExtendedBlock b, boolean fsyncDir)
+      throws IOException {
     try (AutoCloseableLock lock = datasetLock.acquire()) {
       if (Thread.interrupted()) {
         // Don't allow data modifications from interrupted threads
@@ -1616,12 +1620,12 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
         // been opened for append but never modified
         return;
       }
-      finalizeReplica(b.getBlockPoolId(), replicaInfo);
+      finalizeReplica(b.getBlockPoolId(), replicaInfo, fsyncDir);
     }
   }
 
   private ReplicaInfo finalizeReplica(String bpid,
-      ReplicaInfo replicaInfo) throws IOException {
+      ReplicaInfo replicaInfo, boolean fsyncDir) throws IOException {
     try (AutoCloseableLock lock = datasetLock.acquire()) {
       ReplicaInfo newReplicaInfo = null;
       if (replicaInfo.getState() == ReplicaState.RUR &&
@@ -1636,6 +1640,19 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
         newReplicaInfo = v.addFinalizedBlock(
             bpid, replicaInfo, replicaInfo, replicaInfo.getBytesReserved());
+        /*
+         * Sync the directory after rename from tmp/rbw to Finalized if
+         * configured. Though rename should be atomic operation, sync on both
+         * dest and src directories are done because IOUtils.fsync() calls
+         * directory's channel sync, not the journal itself.
+         */
+        if (fsyncDir && newReplicaInfo instanceof FinalizedReplica
+            && replicaInfo instanceof LocalReplica) {
+          FinalizedReplica finalizedReplica = (FinalizedReplica) newReplicaInfo;
+          finalizedReplica.fsyncDirectory();
+          LocalReplica localReplica = (LocalReplica) replicaInfo;
+          localReplica.fsyncDirectory();
+        }
         if (v.isTransientStorage()) {
           releaseLockedMemory(
               replicaInfo.getOriginalBytesReserved()
@@ -2601,11 +2618,11 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
         newReplicaInfo.setNumBytes(newlength);
         volumeMap.add(bpid, newReplicaInfo.getReplicaInfo());
-        finalizeReplica(bpid, newReplicaInfo.getReplicaInfo());
+        finalizeReplica(bpid, newReplicaInfo.getReplicaInfo(), false);
       }
     }
     // finalize the block
-    return finalizeReplica(bpid, rur);
+    return finalizeReplica(bpid, rur, false);
   }
 
   @Override // FsDatasetSpi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1543d0f5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index afa7a82..212f953 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -673,7 +673,8 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   }
 
   @Override // FsDatasetSpi
-  public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
+  public synchronized void finalizeBlock(ExtendedBlock b, boolean fsyncDir)
+      throws IOException {
     final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
     BInfo binfo = map.get(b.getLocalBlock());
     if (binfo == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1543d0f5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index b308ca9..5d4ac1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -802,10 +802,12 @@ public class TestDataNodeHotSwapVolumes {
             // Bypass the argument to FsDatasetImpl#finalizeBlock to verify that
             // the block is not removed, since the volume reference should not
             // be released at this point.
-            data.finalizeBlock((ExtendedBlock) invocation.getArguments()[0]);
+            data.finalizeBlock((ExtendedBlock) invocation.getArguments()[0],
+              (boolean) invocation.getArguments()[1]);
             return null;
           }
-        }).when(dn.data).finalizeBlock(any(ExtendedBlock.class));
+        }).when(dn.data).finalizeBlock(any(ExtendedBlock.class),
+            Mockito.anyBoolean());
 
     final CyclicBarrier barrier = new CyclicBarrier(2);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1543d0f5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
index 469e249b..4775fc7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
@@ -96,7 +96,7 @@ public class TestSimulatedFSDataset {
         out.close();
       }
       b.setNumBytes(blockIdToLen(i));
-      fsdataset.finalizeBlock(b);
+      fsdataset.finalizeBlock(b, false);
       assertEquals(blockIdToLen(i), fsdataset.getLength(b));
     }
     return bytesAdded;
@@ -295,7 +295,7 @@ public class TestSimulatedFSDataset {
     }
     
     try {
-      fsdataset.finalizeBlock(b);
+      fsdataset.finalizeBlock(b, false);
       assertTrue("Expected an IO exception", false);
     } catch (IOException e) {
       // ok - as expected

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1543d0f5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
index d14bd72..13502d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
@@ -180,7 +180,8 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
   }
 
   @Override
-  public void finalizeBlock(ExtendedBlock b) throws IOException {
+  public void finalizeBlock(ExtendedBlock b, boolean fsyncDir)
+      throws IOException {
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1543d0f5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 3293561..2a3bf79 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -582,7 +582,7 @@ public class TestFsDatasetImpl {
           // Lets wait for the other thread finish getting block report
           blockReportReceivedLatch.await();
 
-          dataset.finalizeBlock(eb);
+          dataset.finalizeBlock(eb, false);
           LOG.info("FinalizeBlock finished");
         } catch (Exception e) {
           LOG.warn("Exception caught. This should not affect the test", e);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: HADOOP-14440. Add metrics for connections dropped. Contributed by Eric Badger.

Posted by xg...@apache.org.
HADOOP-14440. Add metrics for connections dropped. Contributed by Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/abdd609e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/abdd609e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/abdd609e

Branch: refs/heads/YARN-5734
Commit: abdd609e51a80388493417126c3bc9b1badc0ac1
Parents: 46f7e91
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Tue Jun 6 00:21:03 2017 +0800
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Tue Jun 6 00:21:03 2017 +0800

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/ipc/Server.java | 20 +++++++++++++++++++-
 .../apache/hadoop/ipc/metrics/RpcMetrics.java   |  4 ++++
 .../hadoop-common/src/site/markdown/Metrics.md  |  1 +
 .../java/org/apache/hadoop/ipc/TestIPC.java     |  8 +++++---
 4 files changed, 29 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/abdd609e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 3ea5a24..f3b9a82 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -64,6 +64,7 @@ import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
 
 import javax.security.sasl.Sasl;
 import javax.security.sasl.SaslException;
@@ -1220,6 +1221,7 @@ public abstract class Server {
           if (channel.isOpen()) {
             IOUtils.cleanup(null, channel);
           }
+          connectionManager.droppedConnections.getAndIncrement();
           continue;
         }
         key.attach(c);  // so closeCurrentConnection can get the object
@@ -3161,6 +3163,16 @@ public abstract class Server {
   }
 
   /**
+   * The number of RPC connections dropped due to
+   * too many connections.
+   * @return the number of dropped rpc connections
+   */
+  public long getNumDroppedConnections() {
+    return connectionManager.getDroppedConnections();
+
+  }
+
+  /**
    * The number of rpc calls in the queue.
    * @return The number of rpc calls in the queue.
    */
@@ -3277,7 +3289,8 @@ public abstract class Server {
   }
   
   private class ConnectionManager {
-    final private AtomicInteger count = new AtomicInteger();    
+    final private AtomicInteger count = new AtomicInteger();
+    final private AtomicLong droppedConnections = new AtomicLong();
     final private Set<Connection> connections;
     /* Map to maintain the statistics per User */
     final private Map<String, Integer> userToConnectionsMap;
@@ -3364,6 +3377,11 @@ public abstract class Server {
       return userToConnectionsMap;
     }
 
+
+    long getDroppedConnections() {
+      return droppedConnections.get();
+    }
+
     int size() {
       return count.get();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/abdd609e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
index e5dde10..8ce1379 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
@@ -121,6 +121,10 @@ public class RpcMetrics {
     return server.getCallQueueLen();
   }
 
+  @Metric("Number of dropped connections") public long numDroppedConnections() {
+    return server.getNumDroppedConnections();
+  }
+
   // Public instrumentation methods that could be extracted to an
   // abstract class if we decide to do custom instrumentation classes a la
   // JobTrackerInstrumentation. The methods with //@Override comment are

http://git-wip-us.apache.org/repos/asf/hadoop/blob/abdd609e/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index a14c86d..4b89bc2 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -79,6 +79,7 @@ Each metrics record contains tags such as Hostname and port (number to which ser
 | `RpcAuthorizationSuccesses` | Total number of authorization successes |
 | `NumOpenConnections` | Current number of open connections |
 | `CallQueueLength` | Current length of the call queue |
+| `numDroppedConnections` | Total number of dropped connections |
 | `rpcQueueTime`*num*`sNumOps` | Shows total number of RPC calls (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
 | `rpcQueueTime`*num*`s50thPercentileLatency` | Shows the 50th percentile of RPC queue time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
 | `rpcQueueTime`*num*`s75thPercentileLatency` | Shows the 75th percentile of RPC queue time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/abdd609e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index 4198e40..611000d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -1339,7 +1339,7 @@ public class TestIPC {
 
   @Test
   public void testMaxConnections() throws Exception {
-    conf.setInt("ipc.server.max.connections", 5);
+    conf.setInt("ipc.server.max.connections", 6);
     Server server = null;
     Thread connectors[] = new Thread[10];
 
@@ -1374,8 +1374,10 @@ public class TestIPC {
       }
 
       Thread.sleep(1000);
-      // server should only accept up to 5 connections
-      assertEquals(5, server.getNumOpenConnections());
+      // server should only accept up to 6 connections
+      assertEquals(6, server.getNumOpenConnections());
+      // server should drop the other 4 connections
+      assertEquals(4, server.getNumDroppedConnections());
 
       for (int i = 0; i < 10; i++) {
         connectors[i].join();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: Update 3.0.0-alpha3 changes, release notes, jdiff.

Posted by xg...@apache.org.
Update 3.0.0-alpha3 changes, release notes, jdiff.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cd612ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cd612ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cd612ba

Branch: refs/heads/YARN-5734
Commit: 2cd612ba8e3b84ddf41acf7b1beb0a4757a2465b
Parents: 16ad896
Author: Andrew Wang <wa...@apache.org>
Authored: Fri May 26 14:14:38 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Fri May 26 14:14:38 2017 -0700

----------------------------------------------------------------------
 .../3.0.0-alpha3/CHANGES.3.0.0-alpha3.md        |  71 ++++
 .../3.0.0-alpha3/RELEASENOTES.3.0.0-alpha3.md   |  22 ++
 .../jdiff/Apache_Hadoop_HDFS_3.0.0-alpha3.xml   | 326 +++++++++++++++++++
 3 files changed, 419 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cd612ba/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/CHANGES.3.0.0-alpha3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/CHANGES.3.0.0-alpha3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/CHANGES.3.0.0-alpha3.md
new file mode 100644
index 0000000..61d63e0
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/CHANGES.3.0.0-alpha3.md
@@ -0,0 +1,71 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# "Apache Hadoop" Changelog
+
+## Release 3.0.0-alpha3 - 2017-05-25
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [YARN-6336](https://issues.apache.org/jira/browse/YARN-6336) | Jenkins report YARN new UI build failure |  Blocker | . | Junping Du | Sunil G |
+| [YARN-6278](https://issues.apache.org/jira/browse/YARN-6278) | Enforce to use correct node and npm version in new YARN-UI build |  Critical | . | Sunil G | Sunil G |
+
+
+### TESTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### SUB-TASKS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cd612ba/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/RELEASENOTES.3.0.0-alpha3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/RELEASENOTES.3.0.0-alpha3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/RELEASENOTES.3.0.0-alpha3.md
new file mode 100644
index 0000000..bda1807
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/RELEASENOTES.3.0.0-alpha3.md
@@ -0,0 +1,22 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# "Apache Hadoop"  3.0.0-alpha3 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cd612ba/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.0-alpha3.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.0-alpha3.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.0-alpha3.xml
new file mode 100644
index 0000000..cadf733
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.0-alpha3.xml
@@ -0,0 +1,326 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Thu May 25 05:19:31 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop HDFS 3.0.0-alpha3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/classes:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-3.0.0-alpha3.jar:/usr/lib/jvm/java-8-oracle/lib/tools.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-3.0.0-alpha3.jar:/maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annotations-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeep
 er-3.4.6.jar:/maven/jline/jline/0.9.94/jline-0.9.94.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/maven/org/apache/kerby/kerb-simplekdc/1.0.0-RC2/kerb-simplekdc-1.0.0-RC2.jar:/maven/org/apache/kerby/kerby-config/1.0.0-RC2/kerby-config-1.0.0-RC2.jar:/maven/org/apache/kerby/kerb-core/1.0.0-RC2/kerb-core-1.0.0-RC2.jar:/maven/org/apache/kerby/kerby-asn1/1.0.0-RC2/kerby-asn1-1.0.0-RC2.jar:/maven/org/apache/kerby/kerby-pkix/1.0.0-RC2/kerby-pkix-1.0.0-RC2.jar:/maven/org/apache/kerby/kerby-util/1.0.0-RC2/kerby-util-1.0.0-RC2.jar:/maven/org/apache/kerby/kerb-client/1.0.0-RC2/kerb-client-1.0.0-RC2.jar:/maven/org/apache/kerby/kerb-common/1.0.0-RC2/kerb-common-1.0.0-RC2.jar:/maven/org/apache/kerby/kerb-util/1.0.0-RC2/kerb-util-1.0.0-RC2.jar:/maven/org/apache/kerby/kerb-crypto/1.0.0-RC2/kerb-crypto-1.0.0-RC2.jar:/maven/org/apache/kerby/kerb-server/1.0.0-RC2/kerb-server-1.0.0-RC2.jar:/maven/org/apache/kerby/kerb-identity/1.0.0-RC2/kerb-identity-1.0.0-RC2.jar:/
 maven/org/apache/kerby/kerb-admin/1.0.0-RC2/kerb-admin-1.0.0-RC2.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-3.0.0-alpha3.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/org/eclipse/jetty/jetty-servlet/9.3.11.v20160721/jetty-servlet-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-security/9.3.11.v20160721/jetty-security-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-webapp/9.3.11.v20160721/jetty-webapp-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-xml/9.3.11.v20160721/jetty-xml-9.3.11.v20160721.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-servlet/1.19/jersey-servlet-1.19.jar:/maven/com/sun/jersey/jersey-json/1.19/jersey-json-1.19.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.
 3-1/jaxb-impl-2.2.3-1.jar:/maven/javax/xml/bind/jaxb-api/2.2.11/jaxb-api-2.2.11.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-beanutils/commons-beanutils/1.9.3/commons-beanutils-1.9.3.jar:/maven/org/apache/commons/commons-configuration2/2.1/commons-configuration2-2.1.jar:/maven/org/apache/commons/commons-lang3/3.3.2/commons-lang3-3.3.2.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/maven/com/google/re2j/re2j/1.0/re2j-1.0.jar:/maven/com/google/
 code/gson/gson/2.2.4/gson-2.2.4.jar:/maven/com/jcraft/jsch/0.1.51/jsch-0.1.51.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-3.0.0-alpha3.jar:/maven/com/squareup/okhttp/okhttp/2.4.0/okhttp-2.4.0.jar:/maven/com/squareup/okio/okio/1.4.0/okio-1.4.0.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.7.8/jackson-annotations-2.7.8.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/org/eclipse/jetty/jetty-server/9.3.11.v20160721/jetty-server-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-http/9.3.11.v20160721/jetty-http-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-io/9.3.11.v20160721/jetty-io-9.3.11.v2016
 0721.jar:/maven/org/eclipse/jetty/jetty-util/9.3.11.v20160721/jetty-util-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-util-ajax/9.3.11.v20160721/jetty-util-ajax-9.3.11.v20160721.jar:/maven/com/sun/jersey/jersey-core/1.19/jersey-core-1.19.jar:/maven/javax/ws/rs/jsr311-api/1.1.1/jsr311-api-1.1.1.jar:/maven/com/sun/jersey/jersey-server/1.19/jersey-server-1.19.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/javax/servlet/javax.servlet-api/3.1.0/javax.servlet-api-3.1.0.jar:/maven/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/maven/xml
 enc/xmlenc/0.52/xmlenc-0.52.jar:/maven/io/netty/netty/3.10.5.Final/netty-3.10.5.Final.jar:/maven/io/netty/netty-all/4.1.0.Beta5/netty-all-4.1.0.Beta5.jar:/maven/com/twitter/hpack/0.11.0/hpack-0.11.0.jar:/maven/xerces/xercesImpl/2.9.1/xercesImpl-2.9.1.jar:/maven/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04.jar:/maven/org/apache/htrace/htrace-core4/4.1.0-incubating/htrace-core4-4.1.0-incubating.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.7.8/jackson-databind-2.7.8.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.7.8/jackson-core-2.7.8.jar -sourcepath /build/source/hadoop-hdfs-project/hadoop-hdfs/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -apidir /build/source/hadoop-hdfs-project/hadoop-hdfs/target
 /site/jdiff/xml -apiname Apache Hadoop HDFS 3.0.0-alpha3 -->
+<package name="org.apache.hadoop.hdfs">
+  <doc>
+  <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}.  This is loosely modelled after
+Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
+
+<p>The most important difference is that unlike GFS, Hadoop DFS files 
+have strictly one writer at any one time.  Bytes are always appended 
+to the end of the writer's stream.  There is no notion of "record appends"
+or "mutations" that are then checked or reordered.  Writers simply emit 
+a byte stream.  That byte stream is guaranteed to be stored in the 
+order written.</p>]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.hdfs.net">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl">
+</package>
+<package name="org.apache.hadoop.hdfs.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.client">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.server">
+  <!-- start interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+  <interface name="JournalNodeMXBean"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getJournalsStatus" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get status information (e.g., whether formatted) of JournalNode's journals.
+ 
+ @return A string presenting status for each journal]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is the JMX management interface for JournalNode information]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.block">
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.delegation">
+</package>
+<package name="org.apache.hadoop.hdfs.server.balancer">
+</package>
+<package name="org.apache.hadoop.hdfs.server.blockmanagement">
+</package>
+<package name="org.apache.hadoop.hdfs.server.common">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web.dtp">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web.webhdfs">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.command">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.connectors">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.datamodel">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.planner">
+</package>
+<package name="org.apache.hadoop.hdfs.server.mover">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode">
+  <!-- start interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <interface name="AuditLogger"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="initialize"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Called during initialization of the logger.
+
+ @param conf The configuration object.]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <doc>
+      <![CDATA[Called to log an audit event.
+ <p>
+ This method must return as quickly as possible, since it's called
+ in a critical section of the NameNode's operation.
+
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's
+             metadata (permissions, owner, times, etc).]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Interface defining an audit logger.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <class name="HdfsAuditLogger" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.hdfs.server.namenode.AuditLogger"/>
+    <constructor name="HdfsAuditLogger"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="status" type="org.apache.hadoop.fs.FileStatus"/>
+    </method>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="callerContext" type="org.apache.hadoop.ipc.CallerContext"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String, String,
+ FileStatus)} with additional parameters related to logging delegation token
+ tracking IDs.
+ 
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's metadata
+          (permissions, owner, times, etc).
+ @param callerContext Context information of the caller
+ @param ugi UserGroupInformation of the current user, or null if not logging
+          token tracking information
+ @param dtSecretManager The token secret manager, or null if not logging
+          token tracking information]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String,
+ String, FileStatus, CallerContext, UserGroupInformation,
+ DelegationTokenSecretManager)} without {@link CallerContext} information.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Extension of {@link AuditLogger}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+  <class name="INodeAttributeProvider" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="INodeAttributeProvider"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="start"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Initialize the provider. This method is called at NameNode startup
+ time.]]>
+      </doc>
+    </method>
+    <method name="stop"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Shutdown the provider. This method is called at NameNode shutdown time.]]>
+      </doc>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fullPath" type="java.lang.String"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pathElements" type="java.lang.String[]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="components" type="byte[][]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getExternalAccessControlEnforcer" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="defaultEnforcer" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"/>
+      <doc>
+      <![CDATA[Can be over-ridden by implementations to provide a custom Access Control
+ Enforcer that can provide an alternate implementation of the
+ default permission checking logic.
+ @param defaultEnforcer The Default AccessControlEnforcer
+ @return The AccessControlEnforcer to use]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.ha">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.window">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.web.resources">
+</package>
+<package name="org.apache.hadoop.hdfs.server.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.tools">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.erasurecode">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineEditsViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.util">
+</package>
+<package name="org.apache.hadoop.hdfs.web">
+</package>
+<package name="org.apache.hadoop.hdfs.web.resources">
+</package>
+
+</api>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: YARN-6683. Invalid event: COLLECTOR_UPDATE at KILLED. Contributed by Rohith Sharma K S

Posted by xg...@apache.org.
YARN-6683. Invalid event: COLLECTOR_UPDATE at KILLED.  Contributed by Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7311015a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7311015a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7311015a

Branch: refs/heads/YARN-5734
Commit: 7311015ace498e03aad88bcbe1581916803ceaba
Parents: 6aeda55
Author: Jian He <ji...@apache.org>
Authored: Mon Jun 5 13:15:08 2017 -0700
Committer: Jian He <ji...@apache.org>
Committed: Mon Jun 5 13:16:57 2017 -0700

----------------------------------------------------------------------
 .../resourcemanager/ResourceTrackerService.java |  6 +--
 .../rmapp/RMAppCollectorUpdateEvent.java        | 40 --------------------
 .../server/resourcemanager/rmapp/RMAppImpl.java | 36 +-----------------
 3 files changed, 2 insertions(+), 80 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7311015a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index 40bd610..aa7f524 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
@@ -69,7 +69,6 @@ import org.apache.hadoop.yarn.server.api.records.NodeStatus;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NodeLabelsUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.DynamicResourceConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppCollectorUpdateEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
@@ -651,10 +650,7 @@ public class ResourceTrackerService extends AbstractService implements
             String previousCollectorAddr = rmApp.getCollectorAddr();
             if (previousCollectorAddr == null
                 || !previousCollectorAddr.equals(collectorAddr)) {
-              // sending collector update event.
-              RMAppCollectorUpdateEvent event =
-                  new RMAppCollectorUpdateEvent(appId, collectorAddr);
-              rmContext.getDispatcher().getEventHandler().handle(event);
+              rmApp.setCollectorAddr(collectorAddr);
             }
           }
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7311015a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppCollectorUpdateEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppCollectorUpdateEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppCollectorUpdateEvent.java
deleted file mode 100644
index 9642911..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppCollectorUpdateEvent.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
-
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-
-/**
- * Event used for updating collector address in RMApp on node heartbeat.
- */
-public class RMAppCollectorUpdateEvent extends RMAppEvent {
-
-  private final String appCollectorAddr;
-
-  public RMAppCollectorUpdateEvent(ApplicationId appId,
-      String appCollectorAddr) {
-    super(appId, RMAppEventType.COLLECTOR_UPDATE);
-    this.appCollectorAddr = appCollectorAddr;
-  }
-
-  public String getAppCollectorAddr(){
-    return this.appCollectorAddr;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7311015a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 78df913..dda9474 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -165,7 +165,7 @@ public class RMAppImpl implements RMApp, Recoverable {
   private long storedFinishTime = 0;
   private int firstAttemptIdInStateStore = 1;
   private int nextAttemptId = 1;
-  private String collectorAddr;
+  private volatile String collectorAddr;
   // This field isn't protected by readlock now.
   private volatile RMAppAttempt currentAttempt;
   private String queue;
@@ -217,8 +217,6 @@ public class RMAppImpl implements RMApp, Recoverable {
      // Transitions from NEW state
     .addTransition(RMAppState.NEW, RMAppState.NEW,
         RMAppEventType.NODE_UPDATE, new RMAppNodeUpdateTransition())
-    .addTransition(RMAppState.NEW, RMAppState.NEW,
-        RMAppEventType.COLLECTOR_UPDATE, new RMAppCollectorUpdateTransition())
     .addTransition(RMAppState.NEW, RMAppState.NEW_SAVING,
         RMAppEventType.START, new RMAppNewlySavingTransition())
     .addTransition(RMAppState.NEW, EnumSet.of(RMAppState.SUBMITTED,
@@ -235,8 +233,6 @@ public class RMAppImpl implements RMApp, Recoverable {
     // Transitions from NEW_SAVING state
     .addTransition(RMAppState.NEW_SAVING, RMAppState.NEW_SAVING,
         RMAppEventType.NODE_UPDATE, new RMAppNodeUpdateTransition())
-    .addTransition(RMAppState.NEW_SAVING, RMAppState.NEW_SAVING,
-        RMAppEventType.COLLECTOR_UPDATE, new RMAppCollectorUpdateTransition())
     .addTransition(RMAppState.NEW_SAVING, RMAppState.SUBMITTED,
         RMAppEventType.APP_NEW_SAVED, new AddApplicationToSchedulerTransition())
     .addTransition(RMAppState.NEW_SAVING, RMAppState.FINAL_SAVING,
@@ -251,8 +247,6 @@ public class RMAppImpl implements RMApp, Recoverable {
      // Transitions from SUBMITTED state
     .addTransition(RMAppState.SUBMITTED, RMAppState.SUBMITTED,
         RMAppEventType.NODE_UPDATE, new RMAppNodeUpdateTransition())
-    .addTransition(RMAppState.SUBMITTED, RMAppState.SUBMITTED,
-        RMAppEventType.COLLECTOR_UPDATE, new RMAppCollectorUpdateTransition())
     .addTransition(RMAppState.SUBMITTED, RMAppState.FINAL_SAVING,
         RMAppEventType.APP_REJECTED,
         new FinalSavingTransition(
@@ -267,8 +261,6 @@ public class RMAppImpl implements RMApp, Recoverable {
      // Transitions from ACCEPTED state
     .addTransition(RMAppState.ACCEPTED, RMAppState.ACCEPTED,
         RMAppEventType.NODE_UPDATE, new RMAppNodeUpdateTransition())
-    .addTransition(RMAppState.ACCEPTED, RMAppState.ACCEPTED,
-        RMAppEventType.COLLECTOR_UPDATE, new RMAppCollectorUpdateTransition())
     .addTransition(RMAppState.ACCEPTED, RMAppState.RUNNING,
         RMAppEventType.ATTEMPT_REGISTERED, new RMAppStateUpdateTransition(
             YarnApplicationState.RUNNING))
@@ -294,8 +286,6 @@ public class RMAppImpl implements RMApp, Recoverable {
      // Transitions from RUNNING state
     .addTransition(RMAppState.RUNNING, RMAppState.RUNNING,
         RMAppEventType.NODE_UPDATE, new RMAppNodeUpdateTransition())
-    .addTransition(RMAppState.RUNNING, RMAppState.RUNNING,
-        RMAppEventType.COLLECTOR_UPDATE, new RMAppCollectorUpdateTransition())
     .addTransition(RMAppState.RUNNING, RMAppState.FINAL_SAVING,
         RMAppEventType.ATTEMPT_UNREGISTERED,
         new FinalSavingTransition(
@@ -325,8 +315,6 @@ public class RMAppImpl implements RMApp, Recoverable {
     .addTransition(RMAppState.FINAL_SAVING, RMAppState.FINAL_SAVING, 
         RMAppEventType.APP_RUNNING_ON_NODE,
         new AppRunningOnNodeTransition())
-    .addTransition(RMAppState.FINAL_SAVING, RMAppState.FINAL_SAVING,
-        RMAppEventType.COLLECTOR_UPDATE, new RMAppCollectorUpdateTransition())
     // ignorable transitions
     .addTransition(RMAppState.FINAL_SAVING, RMAppState.FINAL_SAVING,
         EnumSet.of(RMAppEventType.NODE_UPDATE, RMAppEventType.KILL,
@@ -338,8 +326,6 @@ public class RMAppImpl implements RMApp, Recoverable {
     .addTransition(RMAppState.FINISHING, RMAppState.FINISHING, 
         RMAppEventType.APP_RUNNING_ON_NODE,
         new AppRunningOnNodeTransition())
-    .addTransition(RMAppState.FINISHING, RMAppState.FINISHING,
-        RMAppEventType.COLLECTOR_UPDATE, new RMAppCollectorUpdateTransition())
     // ignorable transitions
     .addTransition(RMAppState.FINISHING, RMAppState.FINISHING,
       EnumSet.of(RMAppEventType.NODE_UPDATE,
@@ -351,8 +337,6 @@ public class RMAppImpl implements RMApp, Recoverable {
     .addTransition(RMAppState.KILLING, RMAppState.KILLING, 
         RMAppEventType.APP_RUNNING_ON_NODE,
         new AppRunningOnNodeTransition())
-    .addTransition(RMAppState.KILLING, RMAppState.KILLING,
-        RMAppEventType.COLLECTOR_UPDATE, new RMAppCollectorUpdateTransition())
     .addTransition(RMAppState.KILLING, RMAppState.FINAL_SAVING,
         RMAppEventType.ATTEMPT_KILLED,
         new FinalSavingTransition(
@@ -1020,24 +1004,6 @@ public class RMAppImpl implements RMApp, Recoverable {
     };
   }
 
-  private static final class RMAppCollectorUpdateTransition
-      extends RMAppTransition {
-
-    public void transition(RMAppImpl app, RMAppEvent event) {
-      if (YarnConfiguration.timelineServiceV2Enabled(app.conf)) {
-        LOG.info("Updating collector info for app: " + app.getApplicationId());
-
-        RMAppCollectorUpdateEvent appCollectorUpdateEvent =
-            (RMAppCollectorUpdateEvent) event;
-        // Update collector address
-        app.setCollectorAddr(appCollectorUpdateEvent.getAppCollectorAddr());
-
-        // TODO persistent to RMStateStore for recover
-        // Save to RMStateStore
-      }
-    };
-  }
-
   private static final class RMAppNodeUpdateTransition extends RMAppTransition {
     public void transition(RMAppImpl app, RMAppEvent event) {
       RMAppNodeUpdateEvent nodeUpdateEvent = (RMAppNodeUpdateEvent) event;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: YARN-6208. Improve the log when FinishAppEvent sent to the NodeManager which didn't run the application (Contributed by Akira Ajisaka via Daniel Templeton)

Posted by xg...@apache.org.
YARN-6208. Improve the log when FinishAppEvent sent to the NodeManager which didn't run the application
(Contributed by Akira Ajisaka via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73ecb193
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73ecb193
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73ecb193

Branch: refs/heads/YARN-5734
Commit: 73ecb19312879d54e1cbe80199fe950d81c81104
Parents: 056cc72
Author: Daniel Templeton <te...@apache.org>
Authored: Fri Jun 2 08:50:19 2017 -0700
Committer: Daniel Templeton <te...@apache.org>
Committed: Fri Jun 2 08:50:19 2017 -0700

----------------------------------------------------------------------
 .../nodemanager/containermanager/ContainerManagerImpl.java     | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73ecb193/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 50268b9..cbf617b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -1475,8 +1475,10 @@ public class ContainerManagerImpl extends CompositeService implements
       for (ApplicationId appID : appsFinishedEvent.getAppsToCleanup()) {
         Application app = this.context.getApplications().get(appID);
         if (app == null) {
-          LOG.warn("couldn't find application " + appID + " while processing"
-              + " FINISH_APPS event");
+          LOG.info("couldn't find application " + appID + " while processing"
+              + " FINISH_APPS event. The ResourceManager allocated resources"
+              + " for this application to the NodeManager but no active"
+              + " containers were found to process.");
           continue;
         }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: YARN-6458. Use yarn package manager to lock down dependency versions for new web UI. Contributed by Sreenath Somarajapuram.

Posted by xg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/46f7e919/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock
new file mode 100644
index 0000000..948feb9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock
@@ -0,0 +1,4983 @@
+# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
+# yarn lockfile v1
+
+
+abbrev@1:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.0.tgz#d0554c2256636e2f56e7c2e5ad183f859428d81f"
+
+abbrev@~1.0.7:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.0.9.tgz#91b4792588a7738c25f35dd6f63752a2f8776135"
+
+accepts@~1.3.3:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.3.tgz#c3ca7434938648c3e0d9c1e328dd68b622c284ca"
+  dependencies:
+    mime-types "~2.1.11"
+    negotiator "0.6.1"
+
+acorn@^4.0.3:
+  version "4.0.11"
+  resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.11.tgz#edcda3bd937e7556410d42ed5860f67399c794c0"
+
+after@0.8.1:
+  version "0.8.1"
+  resolved "https://registry.yarnpkg.com/after/-/after-0.8.1.tgz#ab5d4fb883f596816d3515f8f791c0af486dd627"
+
+align-text@^0.1.1, align-text@^0.1.3:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/align-text/-/align-text-0.1.4.tgz#0cd90a561093f35d0a99256c22b7069433fad117"
+  dependencies:
+    kind-of "^3.0.2"
+    longest "^1.0.1"
+    repeat-string "^1.5.2"
+
+alter@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/alter/-/alter-0.2.0.tgz#c7588808617572034aae62480af26b1d4d1cb3cd"
+  dependencies:
+    stable "~0.1.3"
+
+amd-name-resolver@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/amd-name-resolver/-/amd-name-resolver-0.0.2.tgz#7bee4e112aabeecc2e14429c4ca750c55d8e5ecd"
+
+amdefine@>=0.0.4:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
+
+ansi-regex@^0.2.0, ansi-regex@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-0.2.1.tgz#0d8e946967a3d8143f93e24e298525fc1b2235f9"
+
+ansi-regex@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-1.1.1.tgz#41c847194646375e6a1a5d10c3ca054ef9fc980d"
+
+ansi-regex@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
+
+ansi-styles@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.1.0.tgz#eaecbf66cd706882760b2f4691582b8f55d7a7de"
+
+ansi-styles@^2.0.1, ansi-styles@^2.1.0, ansi-styles@^2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe"
+
+ansi-styles@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.0.0.tgz#cb102df1c56f5123eab8b67cd7b98027a0279178"
+
+ansi@^0.3.0, ansi@~0.3.0, ansi@~0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/ansi/-/ansi-0.3.1.tgz#0c42d4fb17160d5a9af1e484bace1c66922c1b21"
+
+ansicolors@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.2.1.tgz#be089599097b74a5c9c4a84a0cdbcdb62bd87aef"
+
+ansicolors@~0.3.2:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.3.2.tgz#665597de86a9ffe3aa9bfbe6cae5c6ea426b4979"
+
+ansistyles@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/ansistyles/-/ansistyles-0.1.3.tgz#5de60415bda071bb37127854c864f41b23254539"
+
+anymatch@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.3.0.tgz#a3e52fa39168c825ff57b0248126ce5a8ff95507"
+  dependencies:
+    arrify "^1.0.0"
+    micromatch "^2.1.5"
+
+archy@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/archy/-/archy-1.0.0.tgz#f9c8c13757cc1dd7bc379ac77b2c62a5c2868c40"
+
+are-we-there-yet@~1.0.0:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.0.6.tgz#a2d28c93102aa6cc96245a26cb954de06ec53f0c"
+  dependencies:
+    delegates "^1.0.0"
+    readable-stream "^2.0.0 || ^1.1.13"
+
+are-we-there-yet@~1.1.2:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.4.tgz#bb5dca382bb94f05e15194373d16fd3ba1ca110d"
+  dependencies:
+    delegates "^1.0.0"
+    readable-stream "^2.0.6"
+
+argparse@^1.0.7, argparse@~1.0.2:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.9.tgz#73d83bc263f86e97f8cc4f6bae1b0e90a7d22c86"
+  dependencies:
+    sprintf-js "~1.0.2"
+
+arr-diff@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf"
+  dependencies:
+    arr-flatten "^1.0.1"
+
+arr-flatten@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.0.3.tgz#a274ed85ac08849b6bd7847c4580745dc51adfb1"
+
+array-equal@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93"
+
+array-flatten@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2"
+
+array-index@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-index/-/array-index-1.0.0.tgz#ec56a749ee103e4e08c790b9c353df16055b97f9"
+  dependencies:
+    debug "^2.2.0"
+    es6-symbol "^3.0.2"
+
+array-unique@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53"
+
+arraybuffer.slice@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/arraybuffer.slice/-/arraybuffer.slice-0.0.6.tgz#f33b2159f0532a3f3107a272c0ccfbd1ad2979ca"
+
+arrify@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d"
+
+asap@^2.0.0, asap@~2.0.3:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.5.tgz#522765b50c3510490e52d7dcfe085ef9ba96958f"
+
+asn1@0.1.11:
+  version "0.1.11"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.1.11.tgz#559be18376d08a4ec4dbe80877d27818639b2df7"
+
+asn1@~0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86"
+
+assert-plus@1.0.0, assert-plus@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525"
+
+assert-plus@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.1.5.tgz#ee74009413002d84cec7219c6ac811812e723160"
+
+assert-plus@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.2.0.tgz#d74e1b87e7affc0db8aadb7021f3fe48101ab234"
+
+ast-traverse@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ast-traverse/-/ast-traverse-0.1.1.tgz#69cf2b8386f19dcda1bb1e05d68fe359d8897de6"
+
+ast-types@0.8.12:
+  version "0.8.12"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.8.12.tgz#a0d90e4351bb887716c83fd637ebf818af4adfcc"
+
+ast-types@0.9.6:
+  version "0.9.6"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.9.6.tgz#102c9e9e9005d3e7e3829bf0c4fa24ee862ee9b9"
+
+async-disk-cache@^1.0.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/async-disk-cache/-/async-disk-cache-1.3.1.tgz#3394010d9448b16205b01e0e2e704180805413d3"
+  dependencies:
+    debug "^2.1.3"
+    heimdalljs "^0.2.3"
+    istextorbinary "2.1.0"
+    mkdirp "^0.5.0"
+    rimraf "^2.5.3"
+    rsvp "^3.0.18"
+
+async-some@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/async-some/-/async-some-1.0.2.tgz#4d8a81620d5958791b5b98f802d3207776e95509"
+  dependencies:
+    dezalgo "^1.0.2"
+
+async@0.9.0:
+  version "0.9.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.9.0.tgz#ac3613b1da9bed1b47510bb4651b8931e47146c7"
+
+async@^1.0.0:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a"
+
+async@^2.0.1:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-2.3.0.tgz#1013d1051047dd320fe24e494d5c66ecaf6147d9"
+  dependencies:
+    lodash "^4.14.0"
+
+async@~0.2.6, async@~0.2.9:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.2.10.tgz#b6bbe0b0674b9d719708ca38de8c237cb526c3d1"
+
+async@~0.8.0:
+  version "0.8.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.8.0.tgz#ee65ec77298c2ff1456bc4418a052d0f06435112"
+
+aws-sign2@~0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f"
+
+aws4@^1.2.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.6.0.tgz#83ef5ca860b2b32e4a0deedee8c771b9db57471e"
+
+babel-core@^5.0.0:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-5.8.38.tgz#1fcaee79d7e61b750b00b8e54f6dfc9d0af86558"
+  dependencies:
+    babel-plugin-constant-folding "^1.0.1"
+    babel-plugin-dead-code-elimination "^1.0.2"
+    babel-plugin-eval "^1.0.1"
+    babel-plugin-inline-environment-variables "^1.0.1"
+    babel-plugin-jscript "^1.0.4"
+    babel-plugin-member-expression-literals "^1.0.1"
+    babel-plugin-property-literals "^1.0.1"
+    babel-plugin-proto-to-assign "^1.0.3"
+    babel-plugin-react-constant-elements "^1.0.3"
+    babel-plugin-react-display-name "^1.0.3"
+    babel-plugin-remove-console "^1.0.1"
+    babel-plugin-remove-debugger "^1.0.1"
+    babel-plugin-runtime "^1.0.7"
+    babel-plugin-undeclared-variables-check "^1.0.2"
+    babel-plugin-undefined-to-void "^1.1.6"
+    babylon "^5.8.38"
+    bluebird "^2.9.33"
+    chalk "^1.0.0"
+    convert-source-map "^1.1.0"
+    core-js "^1.0.0"
+    debug "^2.1.1"
+    detect-indent "^3.0.0"
+    esutils "^2.0.0"
+    fs-readdir-recursive "^0.1.0"
+    globals "^6.4.0"
+    home-or-tmp "^1.0.0"
+    is-integer "^1.0.4"
+    js-tokens "1.0.1"
+    json5 "^0.4.0"
+    lodash "^3.10.0"
+    minimatch "^2.0.3"
+    output-file-sync "^1.1.0"
+    path-exists "^1.0.0"
+    path-is-absolute "^1.0.0"
+    private "^0.1.6"
+    regenerator "0.8.40"
+    regexpu "^1.3.0"
+    repeating "^1.1.2"
+    resolve "^1.1.6"
+    shebang-regex "^1.0.0"
+    slash "^1.0.0"
+    source-map "^0.5.0"
+    source-map-support "^0.2.10"
+    to-fast-properties "^1.0.0"
+    trim-right "^1.0.0"
+    try-resolve "^1.0.0"
+
+babel-plugin-constant-folding@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-constant-folding/-/babel-plugin-constant-folding-1.0.1.tgz#8361d364c98e449c3692bdba51eff0844290aa8e"
+
+babel-plugin-dead-code-elimination@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-dead-code-elimination/-/babel-plugin-dead-code-elimination-1.0.2.tgz#5f7c451274dcd7cccdbfbb3e0b85dd28121f0f65"
+
+babel-plugin-eval@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-eval/-/babel-plugin-eval-1.0.1.tgz#a2faed25ce6be69ade4bfec263f70169195950da"
+
+babel-plugin-htmlbars-inline-precompile@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/babel-plugin-htmlbars-inline-precompile/-/babel-plugin-htmlbars-inline-precompile-0.0.5.tgz#60fc2a3a453664cb524b21866892c212ee63ff70"
+
+babel-plugin-inline-environment-variables@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-inline-environment-variables/-/babel-plugin-inline-environment-variables-1.0.1.tgz#1f58ce91207ad6a826a8bf645fafe68ff5fe3ffe"
+
+babel-plugin-jscript@^1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-jscript/-/babel-plugin-jscript-1.0.4.tgz#8f342c38276e87a47d5fa0a8bd3d5eb6ccad8fcc"
+
+babel-plugin-member-expression-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-member-expression-literals/-/babel-plugin-member-expression-literals-1.0.1.tgz#cc5edb0faa8dc927170e74d6d1c02440021624d3"
+
+babel-plugin-property-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-property-literals/-/babel-plugin-property-literals-1.0.1.tgz#0252301900192980b1c118efea48ce93aab83336"
+
+babel-plugin-proto-to-assign@^1.0.3:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-proto-to-assign/-/babel-plugin-proto-to-assign-1.0.4.tgz#c49e7afd02f577bc4da05ea2df002250cf7cd123"
+  dependencies:
+    lodash "^3.9.3"
+
+babel-plugin-react-constant-elements@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-constant-elements/-/babel-plugin-react-constant-elements-1.0.3.tgz#946736e8378429cbc349dcff62f51c143b34e35a"
+
+babel-plugin-react-display-name@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-display-name/-/babel-plugin-react-display-name-1.0.3.tgz#754fe38926e8424a4e7b15ab6ea6139dee0514fc"
+
+babel-plugin-remove-console@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-console/-/babel-plugin-remove-console-1.0.1.tgz#d8f24556c3a05005d42aaaafd27787f53ff013a7"
+
+babel-plugin-remove-debugger@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-debugger/-/babel-plugin-remove-debugger-1.0.1.tgz#fd2ea3cd61a428ad1f3b9c89882ff4293e8c14c7"
+
+babel-plugin-runtime@^1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/babel-plugin-runtime/-/babel-plugin-runtime-1.0.7.tgz#bf7c7d966dd56ecd5c17fa1cb253c9acb7e54aaf"
+
+babel-plugin-undeclared-variables-check@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undeclared-variables-check/-/babel-plugin-undeclared-variables-check-1.0.2.tgz#5cf1aa539d813ff64e99641290af620965f65dee"
+  dependencies:
+    leven "^1.0.2"
+
+babel-plugin-undefined-to-void@^1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undefined-to-void/-/babel-plugin-undefined-to-void-1.1.6.tgz#7f578ef8b78dfae6003385d8417a61eda06e2f81"
+
+babylon@^5.8.38:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babylon/-/babylon-5.8.38.tgz#ec9b120b11bf6ccd4173a18bf217e60b79859ffd"
+
+backbone@^1.1.2:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/backbone/-/backbone-1.3.3.tgz#4cc80ea7cb1631ac474889ce40f2f8bc683b2999"
+  dependencies:
+    underscore ">=1.8.3"
+
+backo2@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947"
+
+balanced-match@^0.4.1:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838"
+
+base64-arraybuffer@0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-0.1.2.tgz#474df4a9f2da24e05df3158c3b1db3c3cd46a154"
+
+base64id@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/base64id/-/base64id-0.1.0.tgz#02ce0fdeee0cef4f40080e1e73e834f0b1bfce3f"
+
+basic-auth@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/basic-auth/-/basic-auth-1.1.0.tgz#45221ee429f7ee1e5035be3f51533f1cdfd29884"
+
+bcrypt-pbkdf@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz#63bc5dcb61331b92bc05fd528953c33462a06f8d"
+  dependencies:
+    tweetnacl "^0.14.3"
+
+benchmark@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/benchmark/-/benchmark-1.0.0.tgz#2f1e2fa4c359f11122aa183082218e957e390c73"
+
+better-assert@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/better-assert/-/better-assert-1.0.2.tgz#40866b9e1b9e0b55b481894311e68faffaebc522"
+  dependencies:
+    callsite "1.0.0"
+
+"binaryextensions@1 || 2":
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/binaryextensions/-/binaryextensions-2.0.0.tgz#e597d1a7a6a3558a2d1c7241a16c99965e6aa40f"
+
+bl@~1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-1.0.3.tgz#fc5421a28fd4226036c3b3891a66a25bc64d226e"
+  dependencies:
+    readable-stream "~2.0.5"
+
+bl@~1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-1.1.2.tgz#fdca871a99713aa00d19e3bbba41c44787a65398"
+  dependencies:
+    readable-stream "~2.0.5"
+
+blank-object@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/blank-object/-/blank-object-1.0.2.tgz#f990793fbe9a8c8dd013fb3219420bec81d5f4b9"
+
+blob@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/blob/-/blob-0.0.4.tgz#bcf13052ca54463f30f9fc7e95b9a47630a94921"
+
+block-stream@*, block-stream@0.0.8:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/block-stream/-/block-stream-0.0.8.tgz#0688f46da2bbf9cff0c4f68225a0cb95cbe8a46b"
+  dependencies:
+    inherits "~2.0.0"
+
+bluebird@^2.9.26, bluebird@^2.9.33:
+  version "2.11.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-2.11.0.tgz#534b9033c022c9579c56ba3b3e5a5caafbb650e1"
+
+body-parser@^1.2.0:
+  version "1.17.1"
+  resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.17.1.tgz#75b3bc98ddd6e7e0d8ffe750dfaca5c66993fa47"
+  dependencies:
+    bytes "2.4.0"
+    content-type "~1.0.2"
+    debug "2.6.1"
+    depd "~1.1.0"
+    http-errors "~1.6.1"
+    iconv-lite "0.4.15"
+    on-finished "~2.3.0"
+    qs "6.4.0"
+    raw-body "~2.2.0"
+    type-is "~1.6.14"
+
+body-parser@~1.14.0:
+  version "1.14.2"
+  resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.14.2.tgz#1015cb1fe2c443858259581db53332f8d0cf50f9"
+  dependencies:
+    bytes "2.2.0"
+    content-type "~1.0.1"
+    debug "~2.2.0"
+    depd "~1.1.0"
+    http-errors "~1.3.1"
+    iconv-lite "0.4.13"
+    on-finished "~2.3.0"
+    qs "5.2.0"
+    raw-body "~2.1.5"
+    type-is "~1.6.10"
+
+boom@2.x.x:
+  version "2.10.1"
+  resolved "https://registry.yarnpkg.com/boom/-/boom-2.10.1.tgz#39c8918ceff5799f83f9492a848f625add0c766f"
+  dependencies:
+    hoek "2.x.x"
+
+bower-config@0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/bower-config/-/bower-config-0.6.1.tgz#7093155688bef44079bf4cb32d189312c87ded60"
+  dependencies:
+    graceful-fs "~2.0.0"
+    mout "~0.9.0"
+    optimist "~0.6.0"
+    osenv "0.0.3"
+
+bower-endpoint-parser@0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/bower-endpoint-parser/-/bower-endpoint-parser-0.2.2.tgz#00b565adbfab6f2d35addde977e97962acbcb3f6"
+
+bower-shrinkwrap-resolver-ext@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/bower-shrinkwrap-resolver-ext/-/bower-shrinkwrap-resolver-ext-0.1.0.tgz#963c1a87107501b0cb7823d8cbc84d5167c7fa23"
+  dependencies:
+    debuglog "^1.0.1"
+    json-stable-stringify "^1.0.1"
+    object-assign "^4.0.1"
+    semver "^5.3.0"
+    string.prototype.endswith "^0.2.0"
+
+bower@1.7.7, bower@^1.3.12:
+  version "1.7.7"
+  resolved "https://registry.yarnpkg.com/bower/-/bower-1.7.7.tgz#2fd7ff3ebdcba5a8ffcd84c397c8fdfe9f825f92"
+
+brace-expansion@^1.0.0:
+  version "1.1.7"
+  resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.7.tgz#3effc3c50e000531fb720eaff80f0ae8ef23cf59"
+  dependencies:
+    balanced-match "^0.4.1"
+    concat-map "0.0.1"
+
+braces@^1.8.2:
+  version "1.8.5"
+  resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7"
+  dependencies:
+    expand-range "^1.8.1"
+    preserve "^0.2.0"
+    repeat-element "^1.1.2"
+
+breakable@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/breakable/-/breakable-1.0.0.tgz#784a797915a38ead27bad456b5572cb4bbaa78c1"
+
+broccoli-asset-rev@2.4.2:
+  version "2.4.2"
+  resolved "https://registry.yarnpkg.com/broccoli-asset-rev/-/broccoli-asset-rev-2.4.2.tgz#b84953affbda78d17dffb41349398f50fc26125c"
+  dependencies:
+    broccoli-asset-rewrite "^1.0.9"
+    broccoli-filter "^1.2.2"
+    json-stable-stringify "^1.0.0"
+    matcher-collection "^1.0.1"
+    rsvp "~3.0.6"
+
+broccoli-asset-rewrite@^1.0.9:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-asset-rewrite/-/broccoli-asset-rewrite-1.1.0.tgz#77a5da56157aa318c59113245e8bafb4617f8830"
+  dependencies:
+    broccoli-filter "^1.2.3"
+
+broccoli-babel-transpiler@^5.4.5, broccoli-babel-transpiler@^5.5.0:
+  version "5.6.2"
+  resolved "https://registry.yarnpkg.com/broccoli-babel-transpiler/-/broccoli-babel-transpiler-5.6.2.tgz#958c72e43575b2f0a862a5096dba1ce1ebc7d74d"
+  dependencies:
+    babel-core "^5.0.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.0.1"
+    clone "^0.2.0"
+    hash-for-dep "^1.0.2"
+    json-stable-stringify "^1.0.0"
+
+broccoli-caching-writer@^2.0.0, broccoli-caching-writer@^2.0.4, broccoli-caching-writer@^2.2.0, broccoli-caching-writer@^2.3.1:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-caching-writer/-/broccoli-caching-writer-2.3.1.tgz#b93cf58f9264f003075868db05774f4e7f25bd07"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    broccoli-plugin "1.1.0"
+    debug "^2.1.1"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+    walk-sync "^0.2.5"
+
+broccoli-clean-css@0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/broccoli-clean-css/-/broccoli-clean-css-0.2.0.tgz#15f1c265a6986585a972bfb070bf52e9c054c861"
+  dependencies:
+    broccoli-filter "^0.1.6"
+    clean-css "^2.2.1"
+
+broccoli-config-loader@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/broccoli-config-loader/-/broccoli-config-loader-1.0.0.tgz#c3cf5ecfaffc04338c6f1d5d38dc36baeaa131ba"
+  dependencies:
+    broccoli-caching-writer "^2.0.4"
+
+broccoli-config-replace@^1.1.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/broccoli-config-replace/-/broccoli-config-replace-1.1.2.tgz#6ea879d92a5bad634d11329b51fc5f4aafda9c00"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.2.0"
+    debug "^2.2.0"
+    fs-extra "^0.24.0"
+
+broccoli-filter@^0.1.6:
+  version "0.1.14"
+  resolved "https://registry.yarnpkg.com/broccoli-filter/-/broccoli-filter-0.1.14.tgz#23cae3891ff9ebb7b4d7db00c6dcf03535daf7ad"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.6"
+    broccoli-writer "^0.1.1"
+    mkdirp "^0.3.5"
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.2"
+    rsvp "^3.0.16"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.1.3"
+
+broccoli-filter@^1.2.2, broccoli-filter@^1.2.3:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/broccoli-filter/-/broccoli-filter-1.2.4.tgz#409afb94b9a3a6da9fac8134e91e205f40cc7330"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.0.0"
+    copy-dereference "^1.0.0"
+    debug "^2.2.0"
+    mkdirp "^0.5.1"
+    promise-map-series "^0.2.1"
+    rsvp "^3.0.18"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.3.1"
+
+broccoli-funnel@1.0.1, broccoli-funnel@^1.0.0, broccoli-funnel@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/broccoli-funnel/-/broccoli-funnel-1.0.1.tgz#12cb76e342343592a3b18ae7840c0db3bd16d8af"
+  dependencies:
+    array-equal "^1.0.0"
+    blank-object "^1.0.1"
+    broccoli-plugin "^1.0.0"
+    debug "^2.2.0"
+    fast-ordered-set "^1.0.0"
+    fs-tree-diff "^0.3.0"
+    minimatch "^2.0.1"
+    mkdirp "^0.5.0"
+    path-posix "^1.0.0"
+    rimraf "^2.4.3"
+    symlink-or-copy "^1.0.0"
+    walk-sync "^0.2.6"
+
+broccoli-jshint@^1.0.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/broccoli-jshint/-/broccoli-jshint-1.2.0.tgz#8cd565d11a04bfd32cb8f85a0f7ede1e5be7a6a2"
+  dependencies:
+    broccoli-persistent-filter "^1.2.0"
+    chalk "~0.4.0"
+    findup-sync "^0.3.0"
+    jshint "^2.7.0"
+    json-stable-stringify "^1.0.0"
+    mkdirp "~0.4.0"
+
+broccoli-kitchen-sink-helpers@^0.2.5, broccoli-kitchen-sink-helpers@^0.2.6, broccoli-kitchen-sink-helpers@^0.2.7:
+  version "0.2.9"
+  resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.2.9.tgz#a5e0986ed8d76fb5984b68c3f0450d3a96e36ecc"
+  dependencies:
+    glob "^5.0.10"
+    mkdirp "^0.5.1"
+
+broccoli-kitchen-sink-helpers@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.3.1.tgz#77c7c18194b9664163ec4fcee2793444926e0c06"
+  dependencies:
+    glob "^5.0.10"
+    mkdirp "^0.5.1"
+
+broccoli-less-single@^0.6.4:
+  version "0.6.4"
+  resolved "https://registry.yarnpkg.com/broccoli-less-single/-/broccoli-less-single-0.6.4.tgz#200316f4146b8cf7e6ab97fc661b8085cc89bdb9"
+  dependencies:
+    broccoli-caching-writer "^2.3.1"
+    include-path-searcher "^0.1.0"
+    less "^2.5.0"
+    lodash.merge "^3.3.2"
+    mkdirp "^0.5.0"
+
+broccoli-merge-trees@1.1.1, broccoli-merge-trees@^1.0.0, broccoli-merge-trees@^1.1.0, broccoli-merge-trees@^1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-merge-trees/-/broccoli-merge-trees-1.1.1.tgz#1e283d18c686da922bb91a80d7aac0d161388e21"
+  dependencies:
+    broccoli-plugin "^1.0.0"
+    can-symlink "^1.0.0"
+    debug "^2.2.0"
+    fast-ordered-set "^1.0.2"
+    fs-tree-diff "^0.4.3"
+    rimraf "^2.4.3"
+    symlink-or-copy "^1.0.0"
+
+broccoli-persistent-filter@^1.0.1, broccoli-persistent-filter@^1.0.3, broccoli-persistent-filter@^1.1.6, broccoli-persistent-filter@^1.2.0:
+  version "1.2.13"
+  resolved "https://registry.yarnpkg.com/broccoli-persistent-filter/-/broccoli-persistent-filter-1.2.13.tgz#61368669e2b8f35238fdd38a2a896597e4a1c821"
+  dependencies:
+    async-disk-cache "^1.0.0"
+    blank-object "^1.0.1"
+    broccoli-plugin "^1.0.0"
+    fs-tree-diff "^0.5.2"
+    hash-for-dep "^1.0.2"
+    heimdalljs "^0.2.1"
+    heimdalljs-logger "^0.1.7"
+    md5-hex "^1.0.2"
+    mkdirp "^0.5.1"
+    promise-map-series "^0.2.1"
+    rsvp "^3.0.18"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.3.1"
+
+broccoli-plugin@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-1.1.0.tgz#73e2cfa05f8ea1e3fc1420c40c3d9e7dc724bf02"
+  dependencies:
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.3"
+    rimraf "^2.3.4"
+    symlink-or-copy "^1.0.1"
+
+broccoli-plugin@^1.0.0, broccoli-plugin@^1.2.0, broccoli-plugin@^1.2.1, broccoli-plugin@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-1.3.0.tgz#bee704a8e42da08cb58e513aaa436efb7f0ef1ee"
+  dependencies:
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.3"
+    rimraf "^2.3.4"
+    symlink-or-copy "^1.1.8"
+
+broccoli-sane-watcher@^1.1.1:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/broccoli-sane-watcher/-/broccoli-sane-watcher-1.1.5.tgz#f2b0af9cf0afb74c7a49cd88eb11c6869ee8c0c0"
+  dependencies:
+    broccoli-slow-trees "^1.1.0"
+    debug "^2.1.0"
+    rsvp "^3.0.18"
+    sane "^1.1.1"
+
+broccoli-slow-trees@^1.0.0, broccoli-slow-trees@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-slow-trees/-/broccoli-slow-trees-1.1.0.tgz#426c5724e008107e4573f73e8a9ca702916b78f7"
+
+broccoli-source@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-source/-/broccoli-source-1.1.0.tgz#54f0e82c8b73f46580cbbc4f578f0b32fca8f809"
+
+broccoli-sourcemap-concat@^1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/broccoli-sourcemap-concat/-/broccoli-sourcemap-concat-1.1.6.tgz#7caa0e28e2553c58897c369a673da05600541872"
+  dependencies:
+    broccoli-caching-writer "^2.0.0"
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    fast-sourcemap-concat " ^0.2.4"
+    lodash-node "^2.4.1"
+    lodash.uniq "^3.2.2"
+    mkdirp "^0.5.1"
+
+broccoli-sourcemap-concat@^2.0.2:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/broccoli-sourcemap-concat/-/broccoli-sourcemap-concat-2.0.2.tgz#64dbea4f9da4737c3fc5502efa20bb6322cd06a2"
+  dependencies:
+    broccoli-caching-writer "^2.0.0"
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    fast-sourcemap-concat " ^0.2.4"
+    lodash-node "^2.4.1"
+    lodash.uniq "^3.2.2"
+    minimatch "^2.0.10"
+    mkdirp "^0.5.1"
+
+broccoli-sri-hash@^1.2.2:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/broccoli-sri-hash/-/broccoli-sri-hash-1.2.2.tgz#64e54401ac02ea49ebf2701169ae214c07588493"
+  dependencies:
+    broccoli-caching-writer "^2.2.0"
+    mkdirp "^0.5.1"
+    rsvp "^3.1.0"
+    sri-toolbox "^0.2.0"
+    symlink-or-copy "^1.0.1"
+
+broccoli-stew@^1.0.0:
+  version "1.4.2"
+  resolved "https://registry.yarnpkg.com/broccoli-stew/-/broccoli-stew-1.4.2.tgz#9ec4062fd7162c6026561a2fbf64558363aff8d6"
+  dependencies:
+    broccoli-funnel "^1.0.1"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.1.6"
+    broccoli-plugin "^1.3.0"
+    chalk "^1.1.3"
+    debug "^2.4.0"
+    ensure-posix-path "^1.0.1"
+    fs-extra "^2.0.0"
+    minimatch "^3.0.2"
+    resolve "^1.1.6"
+    rsvp "^3.0.16"
+    sanitize-filename "^1.5.3"
+    symlink-or-copy "^1.1.8"
+    walk-sync "^0.3.0"
+
+broccoli-uglify-sourcemap@^1.0.0:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/broccoli-uglify-sourcemap/-/broccoli-uglify-sourcemap-1.5.2.tgz#04f84ab0db539031fa868ccfa563c9932d50cedb"
+  dependencies:
+    broccoli-plugin "^1.2.1"
+    debug "^2.2.0"
+    lodash.merge "^4.5.1"
+    matcher-collection "^1.0.0"
+    mkdirp "^0.5.0"
+    source-map-url "^0.3.0"
+    symlink-or-copy "^1.0.1"
+    uglify-js "^2.7.0"
+    walk-sync "^0.1.3"
+
+broccoli-unwatched-tree@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-unwatched-tree/-/broccoli-unwatched-tree-0.1.1.tgz#4312fde04bdafe67a05a967d72cc50b184a9f514"
+
+broccoli-viz@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/broccoli-viz/-/broccoli-viz-2.0.1.tgz#3f3ed2fb83e368aa5306fae460801dea552e40db"
+
+broccoli-writer@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-writer/-/broccoli-writer-0.1.1.tgz#d4d71aa8f2afbc67a3866b91a2da79084b96ab2d"
+  dependencies:
+    quick-temp "^0.1.0"
+    rsvp "^3.0.6"
+
+broccoli@0.16.8:
+  version "0.16.8"
+  resolved "https://registry.yarnpkg.com/broccoli/-/broccoli-0.16.8.tgz#2a00f6b82a8106ec9cfb380a8ada4ad490b836d5"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    broccoli-slow-trees "^1.0.0"
+    commander "^2.5.0"
+    connect "^3.3.3"
+    copy-dereference "^1.0.0"
+    findup-sync "^0.2.1"
+    handlebars "^3.0.1"
+    mime "^1.2.11"
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.2"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+
+bser@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/bser/-/bser-1.0.2.tgz#381116970b2a6deea5646dd15dd7278444b56169"
+  dependencies:
+    node-int64 "^0.4.0"
+
+buffer-shims@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/buffer-shims/-/buffer-shims-1.0.0.tgz#9978ce317388c649ad8793028c3477ef044a8b51"
+
+builtin-modules@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f"
+
+builtins@0.0.7:
+  version "0.0.7"
+  resolved "https://registry.yarnpkg.com/builtins/-/builtins-0.0.7.tgz#355219cd6cf18dbe7c01cc7fd2dce765cfdc549a"
+
+builtins@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/builtins/-/builtins-1.0.3.tgz#cb94faeb61c8696451db36534e1422f94f0aee88"
+
+bytes@2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.2.0.tgz#fd35464a403f6f9117c2de3609ecff9cae000588"
+
+bytes@2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.3.0.tgz#d5b680a165b6201739acb611542aabc2d8ceb070"
+
+bytes@2.4.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.4.0.tgz#7d97196f9d5baf7f6935e25985549edd2a6c2339"
+
+callsite@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/callsite/-/callsite-1.0.0.tgz#280398e5d664bd74038b6f0905153e6e8af1bc20"
+
+camelcase@^1.0.2, camelcase@^1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39"
+
+can-symlink@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/can-symlink/-/can-symlink-1.0.0.tgz#97b607d8a84bb6c6e228b902d864ecb594b9d219"
+  dependencies:
+    tmp "0.0.28"
+
+cardinal@^0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/cardinal/-/cardinal-0.5.0.tgz#00d5f661dbd4aabfdf7d41ce48a5a59bca35a291"
+  dependencies:
+    ansicolors "~0.2.1"
+    redeyed "~0.5.0"
+
+caseless@~0.11.0:
+  version "0.11.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.11.0.tgz#715b96ea9841593cc33067923f5ec60ebda4f7d7"
+
+center-align@^0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/center-align/-/center-align-0.1.3.tgz#aa0d32629b6ee972200411cbd4461c907bc2b7ad"
+  dependencies:
+    align-text "^0.1.3"
+    lazy-cache "^1.0.3"
+
+chalk@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.0.tgz#09b453cec497a75520e4a60ae48214a8700e0921"
+  dependencies:
+    ansi-styles "^2.1.0"
+    escape-string-regexp "^1.0.2"
+    has-ansi "^2.0.0"
+    strip-ansi "^3.0.0"
+    supports-color "^2.0.0"
+
+chalk@^0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.5.1.tgz#663b3a648b68b55d04690d49167aa837858f2174"
+  dependencies:
+    ansi-styles "^1.1.0"
+    escape-string-regexp "^1.0.0"
+    has-ansi "^0.1.0"
+    strip-ansi "^0.3.0"
+    supports-color "^0.2.0"
+
+chalk@^1.0.0, chalk@^1.1.1, chalk@^1.1.3:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98"
+  dependencies:
+    ansi-styles "^2.2.1"
+    escape-string-regexp "^1.0.2"
+    has-ansi "^2.0.0"
+    strip-ansi "^3.0.0"
+    supports-color "^2.0.0"
+
+chalk@~0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.4.0.tgz#5199a3ddcd0c1efe23bc08c1b027b06176e0c64f"
+  dependencies:
+    ansi-styles "~1.0.0"
+    has-color "~0.1.0"
+    strip-ansi "~0.1.0"
+
+char-spinner@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/char-spinner/-/char-spinner-1.0.1.tgz#e6ea67bd247e107112983b7ab0479ed362800081"
+
+charm@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/charm/-/charm-1.0.2.tgz#8add367153a6d9a581331052c4090991da995e35"
+  dependencies:
+    inherits "^2.0.1"
+
+chmodr@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/chmodr/-/chmodr-1.0.2.tgz#04662b932d0f02ec66deaa2b0ea42811968e3eb9"
+
+chownr@^1.0.1, chownr@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.0.1.tgz#e2a75042a9551908bebd25b8523d5f9769d79181"
+
+clean-base-url@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/clean-base-url/-/clean-base-url-1.0.0.tgz#c901cf0a20b972435b0eccd52d056824a4351b7b"
+
+clean-css@^2.2.1:
+  version "2.2.23"
+  resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-2.2.23.tgz#0590b5478b516c4903edc2d89bd3fdbdd286328c"
+  dependencies:
+    commander "2.2.x"
+
+cli-color@~0.3.2:
+  version "0.3.3"
+  resolved "https://registry.yarnpkg.com/cli-color/-/cli-color-0.3.3.tgz#12d5bdd158ff8a0b0db401198913c03df069f6f5"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.6"
+    memoizee "~0.3.8"
+    timers-ext "0.1"
+
+cli-table@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cli-table/-/cli-table-0.3.1.tgz#f53b05266a8b1a0b934b3d0821e6e2dc5914ae23"
+  dependencies:
+    colors "1.0.3"
+
+cli@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/cli/-/cli-1.0.1.tgz#22817534f24bfa4950c34d532d48ecbc621b8c14"
+  dependencies:
+    exit "0.1.2"
+    glob "^7.1.1"
+
+cliui@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/cliui/-/cliui-2.1.0.tgz#4b475760ff80264c762c3a1719032e91c7fea0d1"
+  dependencies:
+    center-align "^0.1.1"
+    right-align "^0.1.1"
+    wordwrap "0.0.2"
+
+clone@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-0.2.0.tgz#c6126a90ad4f72dbf5acdb243cc37724fe93fc1f"
+
+clone@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.2.tgz#260b7a99ebb1edfe247538175f783243cb19d149"
+
+cmd-shim@~2.0.1:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/cmd-shim/-/cmd-shim-2.0.2.tgz#6fcbda99483a8fd15d7d30a196ca69d688a2efdb"
+  dependencies:
+    graceful-fs "^4.1.2"
+    mkdirp "~0.5.0"
+
+colors@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-1.0.3.tgz#0433f44d809680fdeb60ed260f1b0c262e82a40b"
+
+colors@~0.6.0-1:
+  version "0.6.2"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-0.6.2.tgz#2423fe6678ac0c5dae8852e5d0e5be08c997abcc"
+
+columnify@~1.5.2:
+  version "1.5.4"
+  resolved "https://registry.yarnpkg.com/columnify/-/columnify-1.5.4.tgz#4737ddf1c7b69a8a7c340570782e947eec8e78bb"
+  dependencies:
+    strip-ansi "^3.0.0"
+    wcwidth "^1.0.0"
+
+combined-stream@^1.0.5, combined-stream@~1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.5.tgz#938370a57b4a51dea2c77c15d5c5fdf895164009"
+  dependencies:
+    delayed-stream "~1.0.0"
+
+commander@2.2.x:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.2.0.tgz#175ad4b9317f3ff615f201c1e57224f55a3e91df"
+
+commander@^2.5.0, commander@^2.6.0, commander@^2.9.0:
+  version "2.9.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+commander@~2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.1.0.tgz#d121bbae860d9992a3d517ba96f56588e47c6781"
+
+commoner@~0.10.3:
+  version "0.10.8"
+  resolved "https://registry.yarnpkg.com/commoner/-/commoner-0.10.8.tgz#34fc3672cd24393e8bb47e70caa0293811f4f2c5"
+  dependencies:
+    commander "^2.5.0"
+    detective "^4.3.1"
+    glob "^5.0.15"
+    graceful-fs "^4.1.2"
+    iconv-lite "^0.4.5"
+    mkdirp "^0.5.0"
+    private "^0.1.6"
+    q "^1.1.2"
+    recast "^0.11.17"
+
+component-bind@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/component-bind/-/component-bind-1.0.0.tgz#00c608ab7dcd93897c0009651b1d3a8e1e73bbd1"
+
+component-emitter@1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.1.2.tgz#296594f2753daa63996d2af08d15a95116c9aec3"
+
+component-inherit@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/component-inherit/-/component-inherit-0.0.3.tgz#645fc4adf58b72b649d5cae65135619db26ff143"
+
+compressible@~2.0.8:
+  version "2.0.10"
+  resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.10.tgz#feda1c7f7617912732b29bf8cf26252a20b9eecd"
+  dependencies:
+    mime-db ">= 1.27.0 < 2"
+
+compression@^1.4.4:
+  version "1.6.2"
+  resolved "https://registry.yarnpkg.com/compression/-/compression-1.6.2.tgz#cceb121ecc9d09c52d7ad0c3350ea93ddd402bc3"
+  dependencies:
+    accepts "~1.3.3"
+    bytes "2.3.0"
+    compressible "~2.0.8"
+    debug "~2.2.0"
+    on-headers "~1.0.1"
+    vary "~1.1.0"
+
+concat-map@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
+
+concat-stream@1.5.0, concat-stream@^1.4.6:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.5.0.tgz#53f7d43c51c5e43f81c8fdd03321c631be68d611"
+  dependencies:
+    inherits "~2.0.1"
+    readable-stream "~2.0.0"
+    typedarray "~0.0.5"
+
+config-chain@~1.1.9:
+  version "1.1.11"
+  resolved "https://registry.yarnpkg.com/config-chain/-/config-chain-1.1.11.tgz#aba09747dfbe4c3e70e766a6e41586e1859fc6f2"
+  dependencies:
+    ini "^1.3.4"
+    proto-list "~1.2.1"
+
+configstore@1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/configstore/-/configstore-1.2.1.tgz#00ad402c0dba027bd8b4b7228dc7d42cefe3c81a"
+  dependencies:
+    graceful-fs "^4.1.2"
+    mkdirp "^0.5.0"
+    object-assign "^3.0.0"
+    os-tmpdir "^1.0.0"
+    osenv "^0.1.0"
+    uuid "^2.0.1"
+    write-file-atomic "^1.1.2"
+    xdg-basedir "^2.0.0"
+
+connect@^3.3.3:
+  version "3.6.1"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-3.6.1.tgz#b7760693a74f0454face1d9378edb3f885b43227"
+  dependencies:
+    debug "2.6.3"
+    finalhandler "1.0.1"
+    parseurl "~1.3.1"
+    utils-merge "1.0.0"
+
+console-browserify@1.1.x:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-1.1.0.tgz#f0241c45730a9fc6323b206dbf38edc741d0bb10"
+  dependencies:
+    date-now "^0.1.4"
+
+consolidate@^0.13.1:
+  version "0.13.1"
+  resolved "https://registry.yarnpkg.com/consolidate/-/consolidate-0.13.1.tgz#9e9503568eb4850889da6ed87a852c8dd2d13f64"
+  dependencies:
+    bluebird "^2.9.26"
+
+content-disposition@0.5.2:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.2.tgz#0cf68bb9ddf5f2be7961c3a85178cb85dba78cb4"
+
+content-type@~1.0.1, content-type@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.2.tgz#b7d113aee7a8dd27bd21133c4dc2529df1721eed"
+
+convert-source-map@^1.1.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.5.0.tgz#9acd70851c6d5dfdd93d9282e5edf94a03ff46b5"
+
+cookie-signature@1.0.6:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c"
+
+cookie@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb"
+
+copy-dereference@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/copy-dereference/-/copy-dereference-1.0.0.tgz#6b131865420fd81b413ba994b44d3655311152b6"
+
+core-js@^1.0.0:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/core-js/-/core-js-1.2.7.tgz#652294c14651db28fa93bd2d5ff2983a4f08c636"
+
+core-object@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/core-object/-/core-object-0.0.2.tgz#c9a6fee8f712e281fa9f6fba10243409ea2debc3"
+  dependencies:
+    lodash-node "^2.4.1"
+
+core-util-is@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
+
+cpr@0.4.2:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/cpr/-/cpr-0.4.2.tgz#cc5083e6d2fa31f52bbfeefae508a445fe6180f2"
+  dependencies:
+    graceful-fs "~4.1.2"
+    mkdirp "~0.5.0"
+    rimraf "~2.4.3"
+
+cross-spawn-async@^2.0.0:
+  version "2.2.5"
+  resolved "https://registry.yarnpkg.com/cross-spawn-async/-/cross-spawn-async-2.2.5.tgz#845ff0c0834a3ded9d160daca6d390906bb288cc"
+  dependencies:
+    lru-cache "^4.0.0"
+    which "^1.2.8"
+
+cryptiles@2.x.x:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-2.0.5.tgz#3bdfecdc608147c1c67202fa291e7dca59eaa3b8"
+  dependencies:
+    boom "2.x.x"
+
+ctype@0.5.3:
+  version "0.5.3"
+  resolved "https://registry.yarnpkg.com/ctype/-/ctype-0.5.3.tgz#82c18c2461f74114ef16c135224ad0b9144ca12f"
+
+d@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/d/-/d-1.0.0.tgz#754bb5bfe55451da69a58b94d45f4c5b0462d58f"
+  dependencies:
+    es5-ext "^0.10.9"
+
+d@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/d/-/d-0.1.1.tgz#da184c535d18d8ee7ba2aa229b914009fae11309"
+  dependencies:
+    es5-ext "~0.10.2"
+
+dashdash@^1.12.0:
+  version "1.14.1"
+  resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0"
+  dependencies:
+    assert-plus "^1.0.0"
+
+date-now@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b"
+
+debug@0.7.4:
+  version "0.7.4"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-0.7.4.tgz#06e1ea8082c2cb14e39806e22e2f6f757f92af39"
+
+debug@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-1.0.2.tgz#3849591c10cce648476c3c7c2e2e3416db5963c4"
+  dependencies:
+    ms "0.6.2"
+
+debug@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-1.0.3.tgz#fc8c6b2d6002804b4081c0208e0f6460ba1fa3e4"
+  dependencies:
+    ms "0.6.2"
+
+debug@1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-1.0.4.tgz#5b9c256bd54b6ec02283176fa8a0ede6d154cbf8"
+  dependencies:
+    ms "0.6.2"
+
+debug@2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.1.0.tgz#33ab915659d8c2cc8a41443d94d6ebd37697ed21"
+  dependencies:
+    ms "0.6.2"
+
+debug@2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.1.tgz#79855090ba2c4e3115cc7d8769491d58f0491351"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.3:
+  version "2.6.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.3.tgz#0f7eb8c30965ec08c72accfa0130c8b79984141d"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.4, debug@^2.1.0, debug@^2.1.1, debug@^2.1.3, debug@^2.2.0, debug@^2.4.0:
+  version "2.6.4"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.4.tgz#7586a9b3c39741c0282ae33445c4e8ac74734fe0"
+  dependencies:
+    ms "0.7.3"
+
+debug@~2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.2.0.tgz#f87057e995b1a1f6ae6a4960664137bc56f039da"
+  dependencies:
+    ms "0.7.1"
+
+debuglog@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/debuglog/-/debuglog-1.0.1.tgz#aa24ffb9ac3df9a2351837cfb2d279360cd78492"
+
+decamelize@^1.0.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
+
+defaults@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.3.tgz#c656051e9817d9ff08ed881477f3fe4019f3ef7d"
+  dependencies:
+    clone "^1.0.2"
+
+defined@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/defined/-/defined-1.0.0.tgz#c98d9bcef75674188e110969151199e39b1fa693"
+
+defs@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/defs/-/defs-1.1.1.tgz#b22609f2c7a11ba7a3db116805c139b1caffa9d2"
+  dependencies:
+    alter "~0.2.0"
+    ast-traverse "~0.1.1"
+    breakable "~1.0.0"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    simple-fmt "~0.1.0"
+    simple-is "~0.2.0"
+    stringmap "~0.2.2"
+    stringset "~0.2.1"
+    tryor "~0.1.2"
+    yargs "~3.27.0"
+
+delayed-stream@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
+
+delegates@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a"
+
+depd@1.1.0, depd@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.0.tgz#e1bd82c6aab6ced965b97b88b17ed3e528ca18c3"
+
+destroy@~1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80"
+
+detect-indent@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-3.0.1.tgz#9dc5e5ddbceef8325764b9451b02bc6d54084f75"
+  dependencies:
+    get-stdin "^4.0.1"
+    minimist "^1.1.0"
+    repeating "^1.1.0"
+
+detective@^4.3.1:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/detective/-/detective-4.5.0.tgz#6e5a8c6b26e6c7a254b1c6b6d7490d98ec91edd1"
+  dependencies:
+    acorn "^4.0.3"
+    defined "^1.0.0"
+
+dezalgo@^1.0.0, dezalgo@^1.0.1, dezalgo@^1.0.2, dezalgo@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/dezalgo/-/dezalgo-1.0.3.tgz#7f742de066fc748bc8db820569dddce49bf0d456"
+  dependencies:
+    asap "^2.0.0"
+    wrappy "1"
+
+did_it_work@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/did_it_work/-/did_it_work-0.0.6.tgz#5180cb9e16ebf9a8753a0cc6b4af9ccdff71ec05"
+
+diff@^1.3.1:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/diff/-/diff-1.4.0.tgz#7f28d2eb9ee7b15a97efd89ce63dcfdaa3ccbabf"
+
+dom-serializer@0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.1.0.tgz#073c697546ce0780ce23be4a28e293e40bc30c82"
+  dependencies:
+    domelementtype "~1.1.1"
+    entities "~1.1.1"
+
+domelementtype@1:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.0.tgz#b17aed82e8ab59e52dd9c19b1756e0fc187204c2"
+
+domelementtype@~1.1.1:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.1.3.tgz#bd28773e2642881aec51544924299c5cd822185b"
+
+domhandler@2.3:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.3.0.tgz#2de59a0822d5027fabff6f032c2b25a2a8abe738"
+  dependencies:
+    domelementtype "1"
+
+domutils@1.5:
+  version "1.5.1"
+  resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.5.1.tgz#dcd8488a26f563d61079e48c9f7b7e32373682cf"
+  dependencies:
+    dom-serializer "0"
+    domelementtype "1"
+
+ecc-jsbn@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505"
+  dependencies:
+    jsbn "~0.1.0"
+
+editions@^1.1.1:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/editions/-/editions-1.3.3.tgz#0907101bdda20fac3cbe334c27cbd0688dc99a5b"
+
+editor@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/editor/-/editor-1.0.0.tgz#60c7f87bd62bcc6a894fa8ccd6afb7823a24f742"
+
+ee-first@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
+
+em-helpers@^0.8.0:
+  version "0.8.0"
+  resolved "https://registry.yarnpkg.com/em-helpers/-/em-helpers-0.8.0.tgz#01678f3692a61d563cce68e49459e206d14db095"
+  dependencies:
+    ember-cli-htmlbars "^1.0.1"
+    ember-cli-less "^1.4.0"
+    source-map "^0.5.6"
+  optionalDependencies:
+    phantomjs-prebuilt "2.1.13"
+
+em-table@^0.7.0:
+  version "0.7.2"
+  resolved "https://registry.yarnpkg.com/em-table/-/em-table-0.7.2.tgz#867ff734701df9765f2505e02acd74768edb0f71"
+  dependencies:
+    ember-cli-htmlbars "^1.0.1"
+    ember-cli-less "^1.4.0"
+    source-map "^0.5.6"
+  optionalDependencies:
+    phantomjs-prebuilt "2.1.13"
+
+ember-array-contains-helper@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ember-array-contains-helper/-/ember-array-contains-helper-1.0.2.tgz#53427e6e9dfcfceb443bfeb6965928b5f624f6a0"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+
+ember-bootstrap@0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/ember-bootstrap/-/ember-bootstrap-0.5.1.tgz#bbad60b2818c47b3fb31562967ae02ee7e92d38c"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+    ember-wormhole "^0.3.4"
+
+ember-cli-app-version@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-app-version/-/ember-cli-app-version-1.0.0.tgz#6963591abb3a176f68ab1507f41324e8154d0e66"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+    ember-cli-htmlbars "^1.0.0"
+    git-repo-version "0.3.0"
+
+ember-cli-babel@5.1.6, ember-cli-babel@^5.0.0, ember-cli-babel@^5.1.3, ember-cli-babel@^5.1.5, ember-cli-babel@^5.1.6:
+  version "5.1.6"
+  resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-5.1.6.tgz#d3e4fe59d96589adf7db1d99ff4f6b9dfa9dc132"
+  dependencies:
+    broccoli-babel-transpiler "^5.4.5"
+    broccoli-funnel "^1.0.0"
+    clone "^1.0.2"
+    ember-cli-version-checker "^1.0.2"
+    resolve "^1.1.2"
+
+ember-cli-content-security-policy@0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-content-security-policy/-/ember-cli-content-security-policy-0.4.0.tgz#71e4f228e68bcefc313f0ffae26f3600a0093276"
+  dependencies:
+    body-parser "^1.2.0"
+
+ember-cli-copy-dereference@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-copy-dereference/-/ember-cli-copy-dereference-1.0.0.tgz#a1795bf6c70650317df4ab8674dd02e0bea5d4fd"
+
+ember-cli-dependency-checker@1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-dependency-checker/-/ember-cli-dependency-checker-1.2.0.tgz#0d1d4fc93a48d9a105fbb120d262d05485dd7425"
+  dependencies:
+    chalk "^0.5.1"
+    is-git-url "0.2.0"
+    semver "^4.1.0"
+
+ember-cli-get-dependency-depth@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-get-dependency-depth/-/ember-cli-get-dependency-depth-1.0.0.tgz#e0afecf82a2d52f00f28ab468295281aec368d11"
+
+ember-cli-htmlbars-inline-precompile@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars-inline-precompile/-/ember-cli-htmlbars-inline-precompile-0.3.1.tgz#5e37101d7017c61ae11b721ee709ae0c1802ce59"
+  dependencies:
+    babel-plugin-htmlbars-inline-precompile "0.0.5"
+    ember-cli-babel "^5.1.3"
+    ember-cli-htmlbars "^1.0.0"
+
+ember-cli-htmlbars@0.7.6:
+  version "0.7.6"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-0.7.6.tgz#07e068eea68133d7e2fa6b505d87673f4bce145f"
+  dependencies:
+    broccoli-filter "^0.1.6"
+    ember-cli-version-checker "^1.0.2"
+
+ember-cli-htmlbars@1.0.2, ember-cli-htmlbars@^1.0.0, ember-cli-htmlbars@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-1.0.2.tgz#53b3e503ed3aaccb8c23592f292bc2e10ae467a1"
+  dependencies:
+    broccoli-persistent-filter "^1.0.3"
+    ember-cli-version-checker "^1.0.2"
+    json-stable-stringify "^1.0.0"
+    strip-bom "^2.0.0"
+
+ember-cli-ic-ajax@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-ic-ajax/-/ember-cli-ic-ajax-0.2.1.tgz#0dd9a2c9f9d16f4da98ade15fef427ee63cf8710"
+  dependencies:
+    ic-ajax "~2.0.1"
+
+ember-cli-inject-live-reload@1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-inject-live-reload/-/ember-cli-inject-live-reload-1.4.0.tgz#1dac5b4a2fecc51cea3c17bce9089596115a7fbd"
+
+ember-cli-is-package-missing@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-is-package-missing/-/ember-cli-is-package-missing-1.0.0.tgz#6e6184cafb92635dd93ca6c946b104292d4e3390"
+
+ember-cli-jquery-ui@0.0.20:
+  version "0.0.20"
+  resolved "https://registry.yarnpkg.com/ember-cli-jquery-ui/-/ember-cli-jquery-ui-0.0.20.tgz#c9949a18c5dc3c650ad9ab6bd8ac107cddf15a40"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+    ember-cli-htmlbars "0.7.6"
+
+ember-cli-less@^1.4.0:
+  version "1.5.4"
+  resolved "https://registry.yarnpkg.com/ember-cli-less/-/ember-cli-less-1.5.4.tgz#4cfbc05c6f23712fe9665f93be9bc8f2cccb0f71"
+  dependencies:
+    broccoli-less-single "^0.6.4"
+    broccoli-merge-trees "^1.0.0"
+    ember-cli-version-checker "^1.1.4"
+    lodash.merge "^3.3.2"
+
+ember-cli-moment-shim@0.7.3:
+  version "0.7.3"
+  resolved "https://registry.yarnpkg.com/ember-cli-moment-shim/-/ember-cli-moment-shim-0.7.3.tgz#bb4f6a36ad726acb9e432b0c73270cf1cd193973"
+  dependencies:
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-stew "^1.0.0"
+    chalk "^1.1.1"
+    ember-cli-babel "^5.0.0"
+    exists-sync "0.0.3"
+    lodash.defaults "^3.1.2"
+
+ember-cli-node-assets@^0.1.4:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/ember-cli-node-assets/-/ember-cli-node-assets-0.1.6.tgz#6488a2949048c801ad6d9e33753c7bce32fc1146"
+  dependencies:
+    broccoli-funnel "^1.0.1"
+    broccoli-merge-trees "^1.1.1"
+    broccoli-unwatched-tree "^0.1.1"
+    debug "^2.2.0"
+    lodash "^4.5.1"
+    resolve "^1.1.7"
+
+ember-cli-normalize-entity-name@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-normalize-entity-name/-/ember-cli-normalize-entity-name-1.0.0.tgz#0b14f7bcbc599aa117b5fddc81e4fd03c4bad5b7"
+  dependencies:
+    silent-error "^1.0.0"
+
+ember-cli-numeral@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-numeral/-/ember-cli-numeral-0.2.0.tgz#60c984aa9e8b97bf79ded777104848935796528a"
+  dependencies:
+    ember-cli-node-assets "^0.1.4"
+    numeral "^1.5.3"
+
+ember-cli-path-utils@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-path-utils/-/ember-cli-path-utils-1.0.0.tgz#4e39af8b55301cddc5017739b77a804fba2071ed"
+
+ember-cli-preprocess-registry@^1.0.3:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-preprocess-registry/-/ember-cli-preprocess-registry-1.1.0.tgz#1a8f848876de2851507842e4c0c9051f62b4aac6"
+  dependencies:
+    broccoli-clean-css "0.2.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    debug "^2.2.0"
+    exists-sync "0.0.3"
+    lodash "^3.10.0"
+    process-relative-require "^1.0.0"
+    silent-error "^1.0.0"
+
+ember-cli-qunit@1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-qunit/-/ember-cli-qunit-1.2.1.tgz#b728bc72ced1b4991d1044eb906b88dbfb019abe"
+  dependencies:
+    broccoli-babel-transpiler "^5.5.0"
+    broccoli-jshint "^1.0.0"
+    broccoli-merge-trees "^1.1.0"
+    broccoli-sourcemap-concat "^1.1.6"
+    ember-cli-version-checker "^1.1.4"
+    ember-qunit "^0.4.18"
+    qunitjs "^1.20.0"
+    resolve "^1.1.6"
+
+ember-cli-release@0.2.8:
+  version "0.2.8"
+  resolved "https://registry.yarnpkg.com/ember-cli-release/-/ember-cli-release-0.2.8.tgz#e9fddd06058c0f3bc2ea57ab2667e9611f8fb205"
+  dependencies:
+    chalk "^1.0.0"
+    git-tools "^0.1.4"
+    make-array "^0.1.2"
+    merge "^1.2.0"
+    moment-timezone "^0.3.0"
+    nopt "^3.0.3"
+    rsvp "^3.0.17"
+    semver "^4.3.1"
+    silent-error "^1.0.0"
+
+ember-cli-sri@1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-sri/-/ember-cli-sri-1.2.1.tgz#105b1f8bfb88fff8817caa14d0776ecb06f857ee"
+  dependencies:
+    broccoli-sri-hash "^1.2.2"
+
+ember-cli-string-utils@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-string-utils/-/ember-cli-string-utils-1.1.0.tgz#39b677fc2805f55173735376fcef278eaa4452a1"
+
+ember-cli-test-info@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-test-info/-/ember-cli-test-info-1.0.0.tgz#ed4e960f249e97523cf891e4aed2072ce84577b4"
+  dependencies:
+    ember-cli-string-utils "^1.0.0"
+
+ember-cli-uglify@1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-uglify/-/ember-cli-uglify-1.2.0.tgz#3208c32b54bc2783056e8bb0d5cfe9bbaf17ffb2"
+  dependencies:
+    broccoli-uglify-sourcemap "^1.0.0"
+
+ember-cli-version-checker@^1.0.2, ember-cli-version-checker@^1.1.4:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-version-checker/-/ember-cli-version-checker-1.3.1.tgz#0bc2d134c830142da64bf9627a0eded10b61ae72"
+  dependencies:
+    semver "^5.3.0"
+
+ember-cli@1.13.14:
+  version "1.13.14"
+  resolved "https://registry.yarnpkg.com/ember-cli/-/ember-cli-1.13.14.tgz#1ff35577a0b4fbb8efad24710f52bb7b874a7765"
+  dependencies:
+    amd-name-resolver "0.0.2"
+    bower "^1.3.12"
+    bower-config "0.6.1"
+    bower-endpoint-parser "0.2.2"
+    broccoli "0.16.8"
+    broccoli-babel-transpiler "^5.4.5"
+    broccoli-config-loader "^1.0.0"
+    broccoli-config-replace "^1.1.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-kitchen-sink-helpers "^0.2.7"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-plugin "^1.2.0"
+    broccoli-sane-watcher "^1.1.1"
+    broccoli-source "^1.1.0"
+    broccoli-sourcemap-concat "^2.0.2"
+    broccoli-viz "^2.0.1"
+    chalk "1.1.0"
+    clean-base-url "^1.0.0"
+    compression "^1.4.4"
+    configstore "1.2.1"
+    core-object "0.0.2"
+    cpr "0.4.2"
+    debug "^2.1.3"
+    diff "^1.3.1"
+    ember-cli-copy-dereference "^1.0.0"
+    ember-cli-get-dependency-depth "^1.0.0"
+    ember-cli-is-package-missing "^1.0.0"
+    ember-cli-normalize-entity-name "^1.0.0"
+    ember-cli-path-utils "^1.0.0"
+    ember-cli-preprocess-registry "^1.0.3"
+    ember-cli-string-utils "^1.0.0"
+    ember-cli-test-info "^1.0.0"
+    ember-router-generator "^1.0.0"
+    escape-string-regexp "^1.0.3"
+    exists-sync "0.0.3"
+    exit "^0.1.2"
+    express "^4.12.3"
+    findup "0.1.5"
+    findup-sync "^0.2.1"
+    fs-extra "0.22.1"
+    fs-monitor-stack "^1.0.2"
+    git-repo-info "^1.0.4"
+    glob "5.0.13"
+    http-proxy "^1.9.0"
+    inflection "^1.7.0"
+    inquirer "0.5.1"
+    is-git-url "^0.2.0"
+    isbinaryfile "^2.0.3"
+    leek "0.0.18"
+    lodash "^3.6.0"
+    markdown-it "4.3.0"
+    markdown-it-terminal "0.0.2"
+    merge-defaults "^0.2.1"
+    minimatch "^2.0.4"
+    morgan "^1.5.2"
+    node-modules-path "^1.0.0"
+    node-uuid "^1.4.3"
+    nopt "^3.0.1"
+    npm "2.14.10"
+    pleasant-progress "^1.0.2"
+    portfinder "^0.4.0"
+    promise-map-series "^0.2.1"
+    quick-temp "0.1.3"
+    readline2 "0.1.1"
+    resolve "^1.1.6"
+    rsvp "^3.0.17"
+    sane "^1.1.1"
+    semver "^4.3.3"
+    silent-error "^1.0.0"
+    symlink-or-copy "^1.0.1"
+    temp "0.8.1"
+    testem "0.9.11"
+    through "^2.3.6"
+    tiny-lr "0.2.0"
+    walk-sync "0.1.3"
+    yam "0.0.18"
+
+ember-d3@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/ember-d3/-/ember-d3-0.1.0.tgz#f670809632298b3f7124c398ea48f1d7503ef1e1"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+
+ember-data@2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/ember-data/-/ember-data-2.1.0.tgz#9aa0f95042010513250818119d63ace3888c7dd9"
+  dependencies:
+    rsvp "^3.0.18"
+
+ember-disable-proxy-controllers@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/ember-disable-proxy-controllers/-/ember-disable-proxy-controllers-1.0.1.tgz#1254eeec0ba025c24eb9e8da611afa7b38754281"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+
+ember-export-application-global@1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/ember-export-application-global/-/ember-export-application-global-1.0.5.tgz#73bd641b19e3474190f717c9b504617511506bea"
+
+ember-qunit@^0.4.18:
+  version "0.4.24"
+  resolved "https://registry.yarnpkg.com/ember-qunit/-/ember-qunit-0.4.24.tgz#b54cf6688c442d07eacea47c3285879cdd7c2163"
+  dependencies:
+    ember-test-helpers "^0.5.32"
+
+ember-resolver@2.0.3:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/ember-resolver/-/ember-resolver-2.0.3.tgz#7ed5cc60049906def4edc80c25eedb301cace3b2"
+  dependencies:
+    ember-cli-babel "^5.1.3"
+    ember-cli-version-checker "^1.1.4"
+
+ember-router-generator@^1.0.0:
+  version "1.2.3"
+  resolved "https://registry.yarnpkg.com/ember-router-generator/-/ember-router-generator-1.2.3.tgz#8ed2ca86ff323363120fc14278191e9e8f1315ee"
+  dependencies:
+    recast "^0.11.3"
+
+ember-spin-spinner@0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/ember-spin-spinner/-/ember-spin-spinner-0.2.3.tgz#932c7823686f33274fc4daf08557fcb18037e876"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+
+ember-test-helpers@^0.5.32:
+  version "0.5.34"
+  resolved "https://registry.yarnpkg.com/ember-test-helpers/-/ember-test-helpers-0.5.34.tgz#c8439108d1cba1d7d838c212208a5c4061471b83"
+  dependencies:
+    klassy "^0.1.3"
+
+ember-truth-helpers@1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/ember-truth-helpers/-/ember-truth-helpers-1.3.0.tgz#6ed9f83ce9a49f52bb416d55e227426339a64c60"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+
+ember-wormhole@^0.3.4:
+  version "0.3.6"
+  resolved "https://registry.yarnpkg.com/ember-wormhole/-/ember-wormhole-0.3.6.tgz#bbe21bb5478ad254efe4fff4019ac6710f4ad85c"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+
+encodeurl@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.1.tgz#79e3d58655346909fe6f0f45a5de68103b294d20"
+
+engine.io-client-pure@1.5.9:
+  version "1.5.9"
+  resolved "https://registry.yarnpkg.com/engine.io-client-pure/-/engine.io-client-pure-1.5.9.tgz#fc3c4977b00ffc5b059dfa73c06216ad838496e2"
+  dependencies:
+    component-emitter "1.1.2"
+    component-inherit "0.0.3"
+    debug "1.0.4"
+    engine.io-parser "1.2.2"
+    has-cors "1.1.0"
+    indexof "0.0.1"
+    parsejson "0.0.1"
+    parseqs "0.0.2"
+    parseuri "0.0.4"
+    ws-pure "0.8.0"
+    xmlhttprequest-ssl "1.5.1"
+
+engine.io-parser@1.2.2:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/engine.io-parser/-/engine.io-parser-1.2.2.tgz#cd081041feea39c64323ff79b82a90a72afcccdd"
+  dependencies:
+    after "0.8.1"
+    arraybuffer.slice "0.0.6"
+    base64-arraybuffer "0.1.2"
+    blob "0.0.4"
+    has-binary "0.1.6"
+    utf8 "2.1.0"
+
+engine.io-pure@1.5.9:
+  version "1.5.9"
+  resolved "https://registry.yarnpkg.com/engine.io-pure/-/engine.io-pure-1.5.9.tgz#d46f763e0945e5f818d6a59061bf93f1e05c89b6"
+  dependencies:
+    base64id "0.1.0"
+    debug "1.0.3"
+    engine.io-parser "1.2.2"
+    ws-pure "0.8.0"
+
+ensure-posix-path@^1.0.0, ensure-posix-path@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ensure-posix-path/-/ensure-posix-path-1.0.2.tgz#a65b3e42d0b71cfc585eb774f9943c8d9b91b0c2"
+
+entities@1.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/entities/-/entities-1.0.0.tgz#b2987aa3821347fcde642b24fdfc9e4fb712bf26"
+
+entities@~1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/entities/-/entities-1.1.1.tgz#6e5c2d0a5621b5dadaecef80b90edfb5cd7772f0"
+
+errno@^0.1.1:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/errno/-/errno-0.1.4.tgz#b896e23a9e5e8ba33871fc996abd3635fc9a1c7d"
+  dependencies:
+    prr "~0.0.0"
+
+es5-ext@^0.10.14, es5-ext@^0.10.9, es5-ext@~0.10.11, es5-ext@~0.10.14, es5-ext@~0.10.2, es5-ext@~0.10.5, es5-ext@~0.10.6:
+  version "0.10.15"
+  resolved "https://registry.yarnpkg.com/es5-ext/-/es5-ext-0.10.15.tgz#c330a5934c1ee21284a7c081a86e5fd937c91ea6"
+  dependencies:
+    es6-iterator "2"
+    es6-symbol "~3.1"
+
+es6-iterator@2:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-2.0.1.tgz#8e319c9f0453bf575d374940a655920e59ca5512"
+  dependencies:
+    d "1"
+    es5-ext "^0.10.14"
+    es6-symbol "^3.1"
+
+es6-iterator@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-0.1.3.tgz#d6f58b8c4fc413c249b4baa19768f8e4d7c8944e"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.5"
+    es6-symbol "~2.0.1"
+
+es6-promise@~4.0.3:
+  version "4.0.5"
+  resolved "https://registry.yarnpkg.com/es6-promise/-/es6-promise-4.0.5.tgz#7882f30adde5b240ccfa7f7d78c548330951ae42"
+
+es6-symbol@^3.0.2, es6-symbol@^3.1, es6-symbol@~3.1:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-3.1.1.tgz#bf00ef4fdab6ba1b46ecb7b629b4c7ed5715cc77"
+  dependencies:
+    d "1"
+    es5-ext "~0.10.14"
+
+es6-symbol@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-2.0.1.tgz#761b5c67cfd4f1d18afb234f691d678682cb3bf3"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.5"
+
+es6-weak-map@~0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/es6-weak-map/-/es6-weak-map-0.1.4.tgz#706cef9e99aa236ba7766c239c8b9e286ea7d228"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.6"
+    es6-iterator "~0.1.3"
+    es6-symbol "~2.0.1"
+
+escape-html@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988"
+
+escape-string-regexp@^1.0.0, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.3:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4"
+
+esprima-fb@~12001.1.0-dev-harmony-fb:
+  version "12001.1.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-12001.1.0-dev-harmony-fb.tgz#d84400384ba95ce2678c617ad24a7f40808da915"
+
+esprima-fb@~15001.1001.0-dev-harmony-fb:
+  version "15001.1001.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-15001.1001.0-dev-harmony-fb.tgz#43beb57ec26e8cf237d3dd8b33e42533577f2659"
+
+esprima@^2.6.0:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581"
+
+esprima@^3.1.1, esprima@~3.1.0:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-3.1.3.tgz#fdca51cee6133895e3c88d535ce49dbff62a4633"
+
+esutils@^2.0.0:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b"
+
+etag@~1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.0.tgz#6f631aef336d6c46362b51764044ce216be3c051"
+
+event-emitter@~0.3.4:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/event-emitter/-/event-emitter-0.3.5.tgz#df8c69eef1647923c7157b9ce83840610b02cc39"
+  dependencies:
+    d "1"
+    es5-ext "~0.10.14"
+
+eventemitter3@1.x.x:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-1.2.0.tgz#1c86991d816ad1e504750e73874224ecf3bec508"
+
+events-to-array@^1.0.1:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/events-to-array/-/events-to-array-1.1.2.tgz#2d41f563e1fe400ed4962fe1a4d5c6a7539df7f6"
+
+exec-sh@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/exec-sh/-/exec-sh-0.2.0.tgz#14f75de3f20d286ef933099b2ce50a90359cef10"
+  dependencies:
+    merge "^1.1.3"
+
+exists-sync@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/exists-sync/-/exists-sync-0.0.3.tgz#b910000bedbb113b378b82f5f5a7638107622dcf"
+
+exit@0.1.2, exit@0.1.x, exit@^0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c"
+
+expand-brackets@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b"
+  dependencies:
+    is-posix-bracket "^0.1.0"
+
+expand-range@^1.8.1:
+  version "1.8.2"
+  resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337"
+  dependencies:
+    fill-range "^2.1.0"
+
+express@^4.10.7, express@^4.12.3:
+  version "4.15.2"
+  resolved "https://registry.yarnpkg.com/express/-/express-4.15.2.tgz#af107fc148504457f2dca9a6f2571d7129b97b35"
+  dependencies:
+    accepts "~1.3.3"
+    array-flatten "1.1.1"
+    content-disposition "0.5.2"
+    content-type "~1.0.2"
+    cookie "0.3.1"
+    cookie-signature "1.0.6"
+    debug "2.6.1"
+    depd "~1.1.0"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    etag "~1.8.0"
+    finalhandler "~1.0.0"
+    fresh "0.5.0"
+    merge-descriptors "1.0.1"
+    methods "~1.1.2"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    path-to-regexp "0.1.7"
+    proxy-addr "~1.1.3"
+    qs "6.4.0"
+    range-parser "~1.2.0"
+    send "0.15.1"
+    serve-static "1.12.1"
+    setprototypeof "1.0.3"
+    statuses "~1.3.1"
+    type-is "~1.6.14"
+    utils-merge "1.0.0"
+    vary "~1.1.0"
+
+extend@~3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.0.tgz#5a474353b9f3353ddd8176dfd37b91c83a46f1d4"
+
+extglob@^0.3.1:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1"
+  dependencies:
+    is-extglob "^1.0.0"
+
+extract-zip@~1.5.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/extract-zip/-/extract-zip-1.5.0.tgz#92ccf6d81ef70a9fa4c1747114ccef6d8688a6c4"
+  dependencies:
+    concat-stream "1.5.0"
+    debug "0.7.4"
+    mkdirp "0.5.0"
+    yauzl "2.4.1"
+
+extsprintf@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.0.2.tgz#e1080e0658e300b06294990cc70e1502235fd550"
+
+fast-ordered-set@^1.0.0, fast-ordered-set@^1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/fast-ordered-set/-/fast-ordered-set-1.0.3.tgz#3fbb36634f7be79e4f7edbdb4a357dee25d184eb"
+  dependencies:
+    blank-object "^1.0.1"
+
+"fast-sourcemap-concat@ ^0.2.4":
+  version "0.2.7"
+  resolved "https://registry.yarnpkg.com/fast-sourcemap-concat/-/fast-sourcemap-concat-0.2.7.tgz#b5d68a6d33e52f9d326fec38b836fa44d9b0d8fc"
+  dependencies:
+    chalk "^0.5.1"
+    debug "^2.2.0"
+    mkdirp "^0.5.0"
+    rsvp "^3.0.14"
+    source-map "^0.4.2"
+    source-map-url "^0.3.0"
+
+faye-websocket@~0.10.0:
+  version "0.10.0"
+  resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.10.0.tgz#4e492f8d04dfb6f89003507f6edbf2d501e7c6f4"
+  dependencies:
+    websocket-driver ">=0.5.1"
+
+fb-watchman@^1.8.0:
+  version "1.9.2"
+  resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-1.9.2.tgz#a24cf47827f82d38fb59a69ad70b76e3b6ae7383"
+  dependencies:
+    bser "1.0.2"
+
+fd-slicer@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/fd-slicer/-/fd-slicer-1.0.1.tgz#8b5bcbd9ec327c5041bf9ab023fd6750f1177e65"
+  dependencies:
+    pend "~1.2.0"
+
+filename-regex@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.0.tgz#996e3e80479b98b9897f15a8a58b3d084e926775"
+
+fileset@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/fileset/-/fileset-0.2.1.tgz#588ef8973c6623b2a76df465105696b96aac8067"
+  dependencies:
+    glob "5.x"
+    minimatch "2.x"
+
+fill-range@^2.1.0:
+  version "2.2.3"
+  resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.3.tgz#50b77dfd7e469bc7492470963699fe7a8485a723"
+  dependencies:
+    is-number "^2.1.0"
+    isobject "^2.0.0"
+    randomatic "^1.1.3"
+    repeat-element "^1.1.2"
+    repeat-string "^1.5.2"
+
+finalhandler@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.1.tgz#bcd15d1689c0e5ed729b6f7f541a6df984117db8"
+  dependencies:
+    debug "2.6.3"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+finalhandler@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.2.tgz#d0e36f9dbc557f2de14423df6261889e9d60c93a"
+  dependencies:
+    debug "2.6.4"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+findup-sync@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.2.1.tgz#e0a90a450075c49466ee513732057514b81e878c"
+  dependencies:
+    glob "~4.3.0"
+
+findup-sync@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.3.0.tgz#37930aa5d816b777c03445e1966cc6790a4c0b16"
+  dependencies:
+    glob "~5.0.0"
+
+findup@0.1.5, findup@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/findup/-/findup-0.1.5.tgz#8ad929a3393bac627957a7e5de4623b06b0e2ceb"
+  dependencies:
+    colors "~0.6.0-1"
+    commander "~2.1.0"
+
+fireworm@^0.6.6:
+  version "0.6.6"
+  resolved "https://registry.yarnpkg.com/fireworm/-/fireworm-0.6.6.tgz#6023218e215c8ae628ac5105a60e470a50983f6f"
+  dependencies:
+    async "~0.2.9"
+    is-type "0.0.1"
+    lodash "~2.3.0"
+    minimatch "~0.2.9"
+
+for-in@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80"
+
+for-own@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce"
+  dependencies:
+    for-in "^1.0.1"
+
+forever-agent@~0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
+
+form-data@~1.0.0-rc3, form-data@~1.0.0-rc4:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-1.0.1.tgz#ae315db9a4907fa065502304a66d7733475ee37c"
+  dependencies:
+    async "^2.0.1"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.11"
+
+forwarded@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.1.0.tgz#19ef9874c4ae1c297bcf078fde63a09b66a84363"
+
+fresh@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.0.tgz#f474ca5e6a9246d6fd8e0953cfa9b9c805afa78e"
+
+fs-extra@0.22.1:
+  version "0.22.1"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.22.1.tgz#5fd6f8049dc976ca19eb2355d658173cabcce056"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.16.3:
+  version "0.16.5"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.16.5.tgz#1ad661fa6c86c9608cd1b49efc6fce834939a750"
+  dependencies:
+    graceful-fs "^3.0.5"
+    jsonfile "^2.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.24.0:
+  version "0.24.0"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.24.0.tgz#d4e4342a96675cb7846633a6099249332b539952"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^2.0.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-2.1.2.tgz#046c70163cef9aad46b0e4a7fa467fb22d71de35"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+
+fs-extra@~0.30.0:
+  version "0.30.0"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.30.0.tgz#f233ffcc08d4da7d432daa449776989db1df93f0"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-monitor-stack@^1.0.2:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/fs-monitor-stack/-/fs-monitor-stack-1.1.1.tgz#c4038d5977939b6b4e38396d7e7cd0895a7ac6b3"
+
+fs-readdir-recursive@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/fs-readdir-recursive/-/fs-readdir-recursive-0.1.2.tgz#315b4fb8c1ca5b8c47defef319d073dad3568059"
+
+fs-tree-diff@^0.3.0:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/fs-tree-diff/-/fs-tree-diff-0.3.1.tgz#41a84ee34994bd564c63d9852f1109c5de7f9290"
+  dependencies:
+    debug "^2.2.0"
+    fast-ordered-set "^1.0.2"
+
+fs-tree-diff@^0.4.3:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/fs-tree-diff/-/fs-tree-diff-0.4.4.tgz#f6b75d70db22c1f3b05d592270f4ed6c9c2f82dd"
+  dependencies:
+    debug "^2.2.0"
+    fast-ordered-set "^1.0.2"
+
+fs-tree-diff@^0.5.2:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/fs-tree-diff/-/fs-tree-diff-0.5.6.tgz#342665749e8dca406800b672268c8f5073f3e623"
+  dependencies:
+    heimdalljs-logger "^0.1.7"
+    object-assign "^4.1.0"
+    path-posix "^1.0.0"
+    symlink-or-copy "^1.1.8"
+
+fs-vacuum@~1.2.7:
+  version "1.2.10"
+  resolved "https://registry.yarnpkg.com/fs-vacuum/-/fs-vacuum-1.2.10.tgz#b7629bec07a4031a2548fdf99f5ecf1cc8b31e36"
+  dependencies:
+    graceful-fs "^4.1.2"
+    path-is-inside "^1.0.1"
+    rimraf "^2.5.2"
+
+fs-write-stream-atomic@~1.0.4:
+  version "1.0.10"
+  resolved "https://registry.yarnpkg.com/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz#b47df53493ef911df75731e70a9ded0189db40c9"
+  dependencies:
+    graceful-fs "^4.1.2"
+    iferr "^0.1.5"
+    imurmurhash "^0.1.4"
+    readable-stream "1 || 2"
+
+fs.realpath@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
+
+fstream-ignore@^1.0.0:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/fstream-ignore/-/fstream-ignore-1.0.5.tgz#9c31dae34767018fe1d249b24dada67d092da105"
+  dependencies:
+    fstream "^1.0.0"
+    inherits "2"
+    minimatch "^3.0.0"
+
+fstream-npm@~1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/fstream-npm/-/fstream-npm-1.0.7.tgz#7ed0d1ac13d7686dd9e1bf6ceb8be273bf6d2f86"
+  dependencies:
+    fstream-ignore "^1.0.0"
+    inherits "2"
+
+fstream@^1.0.0, fstream@^1.0.2, fstream@~1.0.8:
+  version "1.0.11"
+  resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.11.tgz#5c1fb1f117477114f0632a0eb4b71b3cb0fd3171"
+  dependencies:
+    graceful-fs "^4.1.2"
+    inherits "~2.0.0"
+    mkdirp ">=0.5 0"
+    rimraf "2"
+
+gauge@~1.2.0, gauge@~1.2.5:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/gauge/-/gauge-1.2.7.tgz#e9cec5483d3d4ee0ef44b60a7d99e4935e136d93"
+  dependencies:
+    ansi "^0.3.0"
+    has-unicode "^2.0.0"
+    lodash.pad "^4.1.0"
+    lodash.padend "^4.1.0"
+    lodash.padstart "^4.1.0"
+
+generate-function@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.0.0.tgz#6858fe7c0969b7d4e9093337647ac79f60dfbe74"
+
+generate-object-property@^1.1.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/generate-object-property/-/generate-object-property-1.2.0.tgz#9c0e1c40308ce804f4783618b937fa88f99d50d0"
+  dependencies:
+    is-property "^1.0.0"
+
+get-stdin@^4.0.1:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-4.0.1.tgz#b968c6b0a04384324902e8bf1a5df32579a450fe"
+
+getpass@^0.1.1:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.6.tgz#283ffd9fc1256840875311c1b60e8c40187110e6"
+  dependencies:
+    assert-plus "^1.0.0"
+
+git-repo-info@^1.0.4:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/git-repo-info/-/git-repo-info-1.4.1.tgz#2a072823254aaf62fcf0766007d7b6651bd41943"
+
+git-repo-version@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/git-repo-version/-/git-repo-version-0.3.0.tgz#c9b97d0d21c4357d669dc1269c2b6a75da6cc0e9"
+  dependencies:
+    git-repo-info "^1.0.4"
+
+git-tools@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/git-tools/-/git-tools-0.1.4.tgz#5e43e59443b8a5dedb39dba663da49e79f943978"
+  dependencies:
+    spawnback "~1.0.0"
+
+github-url-from-git@~1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/github-url-from-git/-/github-url-from-git-1.4.0.tgz#285e6b520819001bde128674704379e4ff03e0de"
+
+github-url-from-username-repo@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/github-url-from-username-repo/-/github-url-from-username-repo-1.0.2.tgz#7dd79330d2abe69c10c2cef79714c97215791dfa"
+
+glob-base@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4"
+  dependencies:
+    glob-parent "^2.0.0"
+    is-glob "^2.0.0"
+
+glob-parent@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28"
+  dependencies:
+    is-glob "^2.0.0"
+
+"glob@3 || 4", glob@~4.3.0:
+  version "4.3.5"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-4.3.5.tgz#80fbb08ca540f238acce5d11d1e9bc41e75173d3"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^2.0.1"
+    once "^1.3.0"
+
+glob@5.0.13, glob@^5.0.10:
+  version "5.0.13"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.13.tgz#0b6ffc3ac64eb90669f723a00a0ebb7281b33f8f"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^2.0.1"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@5.x, glob@^5.0.15, glob@~5.0.0, glob@~5.0.15:
+  version "5.0.15"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^6.0.1:
+  version "6.0.4"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-6.0.4.tgz#0f08860f6a155127b2fadd4f9ce24b1aab6e4d22"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^7.0.5, glob@^7.1.1:
+  version "7.1.1"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.1.tgz#805211df04faaf1c63a3600306cdf5ade50b2ec8"
+  dependencies:
+    fs.realpath "^1.0.0"
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^3.0.2"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+globals@^6.4.0:
+  version "6.4.1"
+  resolved "https://registry.yarnpkg.com/globals/-/globals-6.4.1.tgz#8498032b3b6d1cc81eebc5f79690d8fe29fabf4f"
+
+graceful-fs@^3.0.5:
+  version "3.0.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-3.0.11.tgz#7613c778a1afea62f25c630a086d7f3acbbdd818"
+  dependencies:
+    natives "^1.1.0"
+
+graceful-fs@^4.1.11, graceful-fs@^4.1.2, graceful-fs@^4.1.4, graceful-fs@^4.1.6, graceful-fs@^4.1.9, graceful-fs@~4.1.2:
+  version "4.1.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658"
+
+graceful-fs@~2.0.0:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-2.0.3.tgz#7cd2cdb228a4a3f36e95efa6cc142de7d1a136d0"
+
+"graceful-readlink@>= 1.0.0":
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725"
+
+growl@^1.8.1:
+  version "1.9.2"
+  resolved "https://registry.yarnpkg.com/growl/-/growl-1.9.2.tgz#0ea7743715db8d8de2c5ede1775e1b45ac85c02f"
+
+handlebars@^3.0.1:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-3.0.3.tgz#0e09651a2f0fb3c949160583710d551f92e6d2ad"
+  dependencies:
+    optimist "^0.6.1"
+    source-map "^0.1.40"
+  optionalDependencies:
+    uglify-js "~2.3"
+
+har-validator@~2.0.2, har-validator@~2.0.6:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-2.0.6.tgz#cdcbc08188265ad119b6a5a7c8ab70eecfb5d27d"
+  dependencies:
+    chalk "^1.1.1"
+    commander "^2.9.0"
+    is-my-json-valid "^2.12.4"
+    pinkie-promise "^2.0.0"
+
+has-ansi@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-0.1.0.tgz#84f265aae8c0e6a88a12d7022894b7568894c62e"
+  dependencies:
+    ansi-regex "^0.2.0"
+
+has-ansi@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+has-binary-data@0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/has-binary-data/-/has-binary-data-0.1.3.tgz#8ebb18388b57f19a5231275a16fc18d51f379aae"
+  dependencies:
+    isarray "0.0.1"
+
+has-binary@0.1.6:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.6.tgz#25326f39cfa4f616ad8787894e3af2cfbc7b6e10"
+  dependencies:
+    isarray "0.0.1"
+
+has-color@~0.1.0:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-color/-/has-color-0.1.7.tgz#67144a5260c34fc3cca677d041daf52fe7b78b2f"
+
+has-cors@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/has-cors/-/has-cors-1.1.0.tgz#5e474793f7ea9843d1bb99c23eef49ff126fff39"
+
+has-unicode@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9"
+
+hash-for-dep@^1.0.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/hash-for-dep/-/hash-for-dep-1.1.2.tgz#e3347ed92960eb0bb53a2c6c2b70e36d75b7cd0c"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    heimdalljs "^0.2.3"
+    heimdalljs-logger "^0.1.7"
+    resolve "^1.1.6"
+
+hasha@~2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/hasha/-/hasha-2.2.0.tgz#78d7cbfc1e6d66303fe79837365984517b2f6ee1"
+  dependencies:
+    is-stream "^1.0.1"
+    pinkie-promise "^2.0.0"
+
+hawk@~3.1.0, hawk@~3.1.3:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-3.1.3.tgz#078444bd7c1640b0fe540d2c9b73d59678e8e1c4"
+  dependencies:
+    boom "2.x.x"
+    cryptiles "2.x.x"
+    hoek "2.x.x"
+    sntp "1.x.x"
+
+heimdalljs-logger@^0.1.7:
+  version "0.1.9"
+  resolved "https://registry.yarnpkg.com/heimdalljs-logger/-/heimdalljs-logger-0.1.9.tgz#d76ada4e45b7bb6f786fc9c010a68eb2e2faf176"
+  dependencies:
+    debug "^2.2.0"
+    heimdalljs "^0.2.0"
+
+heimdalljs@^0.2.0, heimdalljs@^0.2.1, heimdalljs@^0.2.3:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/heimdalljs/-/heimdalljs-0.2.4.tgz#34ead16eab422c94803065d33abeba1f7b24a910"
+  dependencies:
+    rsvp "~3.2.1"
+
+hoek@2.x.x:
+  version "2.16.3"
+  resolved "https://registry.yarnpkg.com/hoek/-/hoek-2.16.3.tgz#20bb7403d3cea398e91dc4710a8ff1b8274a25ed"
+
+home-or-tmp@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-1.0.

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: HADOOP-14460. Azure: update doc for live and contract tests. Contributed by Mingliang Liu

Posted by xg...@apache.org.
HADOOP-14460. Azure: update doc for live and contract tests. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ece33208
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ece33208
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ece33208

Branch: refs/heads/YARN-5734
Commit: ece33208b89a6a06c1b362d10fab31343fe5df13
Parents: ff31035
Author: Mingliang Liu <li...@apache.org>
Authored: Wed May 31 14:55:39 2017 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Thu Jun 1 11:52:11 2017 -0700

----------------------------------------------------------------------
 .../hadoop-azure/src/site/markdown/index.md     | 90 ++++++++++++--------
 1 file changed, 56 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ece33208/hadoop-tools/hadoop-azure/src/site/markdown/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/index.md b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
index 1d1274b..1dca3b9 100644
--- a/hadoop-tools/hadoop-azure/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
@@ -396,42 +396,64 @@ To resolve this, restart the Azure Emulator.  Ensure it v3.2 or later.
 
 It's also possible to run tests against a live Azure Storage account by saving a
 file to `src/test/resources/azure-auth-keys.xml` and setting
-`fs.azure.test.account.name` to the name of the storage account.
+the name of the storage account and its access key.
 
 For example:
+```xml
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+  <property>
+    <name>fs.azure.test.account.name</name>
+    <value>{ACCOUNTNAME}.blob.core.windows.net</value>
+  </property>
+  <property>
+    <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
+    <value>{ACCOUNT ACCESS KEY}</value>
+  </property>
+</configuration>
+```
 
-    <?xml version="1.0"?>
-    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-    <configuration>
-      <property>
-        <name>fs.azure.account.key.youraccount.blob.core.windows.net</name>
-        <value>YOUR ACCESS KEY</value>
-      </property>
-
-      <property>
-        <name>fs.azure.test.account.name</name>
-        <value>youraccount</value>
-      </property>
-    </configuration>
-
-To run contract tests add live Azure Storage account by saving a
-file to `src/test/resources/azure-auth-keys.xml`.
-For example:
+To run contract tests, set the WASB file system URI in `src/test/resources/azure-auth-keys.xml`
+and the account access key. For example:
+
+```xml
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+  <property>
+    <name>fs.contract.test.fs.wasb</name>
+    <value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
+    <description>The name of the azure file system for testing.</description>
+  </property>
+  <property>
+    <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
+    <value>{ACCOUNT ACCESS KEY}</value>
+  </property>
+</configuration>
+```
 
-    <?xml version="1.0"?>
-    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-    <configuration>
-      <property>
-        <name>fs.contract.test.fs.wasb</name>
-        <value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
-        <description>The name of the azure file system for testing.</description>
-      </property>
-
-      <property>
-        <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
-        <value>{ACCOUNTKEY}</value>
-      </property>
-    </configuration>
-
-DO NOT ADD azure-auth-keys.xml TO REVISION CONTROL.  The keys to your Azure
+Overall, to run all the tests using `mvn test`,  a sample `azure-auth-keys.xml` is like following:
+
+```xml
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+  <property>
+    <name>fs.azure.test.account.name</name>
+    <value>{ACCOUNTNAME}.blob.core.windows.net</value>
+  </property>
+  <property>
+    <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
+    <value>{ACCOUNT ACCESS KEY}</value>
+  </property>
+  <property>
+    <name>fs.contract.test.fs.wasb</name>
+    <value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
+  </property>
+</configuration>
+```
+
+DO NOT ADD `azure-auth-keys.xml` TO REVISION CONTROL.  The keys to your Azure
 Storage account are a secret and must not be shared.
+


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: YARN-5952. Create REST API for changing YARN scheduler configurations. (Jonathan Hung via wangda)

Posted by xg...@apache.org.
YARN-5952. Create REST API for changing YARN scheduler configurations. (Jonathan Hung via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7003188a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7003188a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7003188a

Branch: refs/heads/YARN-5734
Commit: 7003188aa18bfb4c2d46b1fe088459adcc7c0345
Parents: 630ee7e
Author: Wangda Tan <wa...@apache.org>
Authored: Mon Apr 3 10:12:01 2017 -0700
Committer: Xuan <xg...@apache.org>
Committed: Mon Jun 5 14:03:17 2017 -0700

----------------------------------------------------------------------
 .../scheduler/MutableConfScheduler.java         |  40 ++
 .../scheduler/MutableConfigurationProvider.java |   5 +-
 .../scheduler/capacity/CapacityScheduler.java   |  16 +-
 .../conf/InMemoryConfigurationStore.java        |   6 +-
 .../conf/MutableCSConfigurationProvider.java    |  24 +-
 .../resourcemanager/webapp/RMWebServices.java   | 169 +++++++
 .../webapp/dao/QueueConfigInfo.java             |  57 +++
 .../webapp/dao/QueueConfigsUpdateInfo.java      |  60 +++
 .../TestMutableCSConfigurationProvider.java     |   6 +-
 .../TestRMWebServicesConfigurationMutation.java | 477 +++++++++++++++++++
 10 files changed, 849 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7003188a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
new file mode 100644
index 0000000..35e36e1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.security.UserGroupInformation;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Interface for a scheduler that supports changing configuration at runtime.
+ *
+ */
+public interface MutableConfScheduler extends ResourceScheduler {
+
+  /**
+   * Update the scheduler's configuration.
+   * @param user Caller of this update
+   * @param confUpdate key-value map of the configuration update
+   * @throws IOException if update is invalid
+   */
+  void updateConfiguration(UserGroupInformation user,
+      Map<String, String> confUpdate) throws IOException;
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7003188a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index da30a2b..889c3bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import java.io.IOException;
 import java.util.Map;
 
 /**
@@ -29,7 +30,9 @@ public interface MutableConfigurationProvider {
    * Update the scheduler configuration with the provided key value pairs.
    * @param user User issuing the request
    * @param confUpdate Key-value pairs for configurations to be updated.
+   * @throws IOException if scheduler could not be reinitialized
    */
-  void mutateConfiguration(String user, Map<String, String> confUpdate);
+  void mutateConfiguration(String user, Map<String, String> confUpdate)
+      throws IOException;
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7003188a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 9353df2..85138d5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -85,6 +85,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnSched
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerUpdates;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfigurationProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
@@ -149,7 +151,7 @@ import com.google.common.util.concurrent.SettableFuture;
 public class CapacityScheduler extends
     AbstractYarnScheduler<FiCaSchedulerApp, FiCaSchedulerNode> implements
     PreemptableResourceScheduler, CapacitySchedulerContext, Configurable,
-    ResourceAllocationCommitter {
+    ResourceAllocationCommitter, MutableConfScheduler {
 
   private static final Log LOG = LogFactory.getLog(CapacityScheduler.class);
 
@@ -2486,4 +2488,16 @@ public class CapacityScheduler extends
       writeLock.unlock();
     }
   }
+
+  @Override
+  public void updateConfiguration(UserGroupInformation user,
+      Map<String, String> confUpdate) throws IOException {
+    if (csConfProvider instanceof MutableConfigurationProvider) {
+      ((MutableConfigurationProvider) csConfProvider).mutateConfiguration(
+          user.getShortUserName(), confUpdate);
+    } else {
+      throw new UnsupportedOperationException("Configured CS configuration " +
+          "provider does not support updating configuration.");
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7003188a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
index a208fb9..b97be1b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
@@ -58,7 +58,11 @@ public class InMemoryConfigurationStore implements YarnConfigurationStore {
         if (isValid) {
           Map<String, String> mutations = mutation.getUpdates();
           for (Map.Entry<String, String> kv : mutations.entrySet()) {
-            schedConf.set(kv.getKey(), kv.getValue());
+            if (kv.getValue() == null) {
+              schedConf.unset(kv.getKey());
+            } else {
+              schedConf.set(kv.getKey(), kv.getValue());
+            }
           }
         }
         return true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7003188a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
index 267ab6a..ea1b3c0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
@@ -60,34 +60,44 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
     }
     Configuration initialSchedConf = new Configuration(false);
     initialSchedConf.addResource(YarnConfiguration.CS_CONFIGURATION_FILE);
-    this.schedConf = initialSchedConf;
-    confStore.initialize(config, initialSchedConf);
+    this.schedConf = new Configuration(false);
+    // We need to explicitly set the key-values in schedConf, otherwise
+    // these configuration keys cannot be deleted when
+    // configuration is reloaded.
+    for (Map.Entry<String, String> kv : initialSchedConf) {
+      schedConf.set(kv.getKey(), kv.getValue());
+    }
+    confStore.initialize(config, schedConf);
     this.conf = config;
   }
 
   @Override
   public CapacitySchedulerConfiguration loadConfiguration(Configuration
       configuration) throws IOException {
-    Configuration loadedConf = new Configuration(configuration);
-    loadedConf.addResource(schedConf);
+    Configuration loadedConf = new Configuration(schedConf);
+    loadedConf.addResource(configuration);
     return new CapacitySchedulerConfiguration(loadedConf, false);
   }
 
   @Override
   public void mutateConfiguration(String user,
-      Map<String, String> confUpdate) {
+      Map<String, String> confUpdate) throws IOException {
     Configuration oldConf = new Configuration(schedConf);
     LogMutation log = new LogMutation(confUpdate, user);
     long id = confStore.logMutation(log);
     for (Map.Entry<String, String> kv : confUpdate.entrySet()) {
-      schedConf.set(kv.getKey(), kv.getValue());
+      if (kv.getValue() == null) {
+        schedConf.unset(kv.getKey());
+      } else {
+        schedConf.set(kv.getKey(), kv.getValue());
+      }
     }
     try {
       rmContext.getScheduler().reinitialize(conf, rmContext);
     } catch (IOException e) {
       schedConf = oldConf;
       confStore.confirmMutation(id, false);
-      return;
+      throw e;
     }
     confStore.confirmMutation(id, true);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7003188a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index bd0602b..78eb4de 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -57,6 +57,7 @@ import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response.Status;
 
+import com.google.common.base.Joiner;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -136,11 +137,13 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
@@ -2614,4 +2617,170 @@ public class RMWebServices extends WebServices {
         app.getApplicationTimeouts().get(appTimeout.getTimeoutType()));
     return Response.status(Status.OK).entity(timeout).build();
   }
+
+  @PUT
+  @Path("/queues")
+  @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
+      MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  public Response updateSchedulerConfiguration(QueueConfigsUpdateInfo
+      mutationInfo, @Context HttpServletRequest hsr)
+      throws AuthorizationException, InterruptedException {
+    init();
+
+    UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+    ApplicationACLsManager aclsManager = rm.getApplicationACLsManager();
+    if (aclsManager.areACLsEnabled()) {
+      if (callerUGI == null || !aclsManager.isAdmin(callerUGI)) {
+        String msg = "Only admins can carry out this operation.";
+        throw new ForbiddenException(msg);
+      }
+    }
+
+    ResourceScheduler scheduler = rm.getResourceScheduler();
+    if (scheduler instanceof MutableConfScheduler) {
+      try {
+        callerUGI.doAs(new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws IOException, YarnException {
+            Map<String, String> confUpdate =
+                constructKeyValueConfUpdate(mutationInfo);
+            ((CapacityScheduler) scheduler).updateConfiguration(callerUGI,
+                confUpdate);
+            return null;
+          }
+        });
+      } catch (IOException e) {
+        return Response.status(Status.BAD_REQUEST).entity(e.getMessage())
+            .build();
+      }
+      return Response.status(Status.OK).entity("Configuration change " +
+          "successfully applied.").build();
+    } else {
+      return Response.status(Status.BAD_REQUEST)
+          .entity("Configuration change only supported by CapacityScheduler.")
+          .build();
+    }
+  }
+
+  private Map<String, String> constructKeyValueConfUpdate(
+      QueueConfigsUpdateInfo mutationInfo) throws IOException {
+    CapacitySchedulerConfiguration currentConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    CapacitySchedulerConfiguration proposedConf =
+        new CapacitySchedulerConfiguration(currentConf, false);
+    Map<String, String> confUpdate = new HashMap<>();
+    for (String queueToRemove : mutationInfo.getRemoveQueueInfo()) {
+      removeQueue(queueToRemove, proposedConf, confUpdate);
+    }
+    for (QueueConfigInfo addQueueInfo : mutationInfo.getAddQueueInfo()) {
+      addQueue(addQueueInfo, proposedConf, confUpdate);
+    }
+    for (QueueConfigInfo updateQueueInfo : mutationInfo.getUpdateQueueInfo()) {
+      updateQueue(updateQueueInfo, proposedConf, confUpdate);
+    }
+    return confUpdate;
+  }
+
+  private void removeQueue(
+      String queueToRemove, CapacitySchedulerConfiguration proposedConf,
+      Map<String, String> confUpdate) throws IOException {
+    if (queueToRemove == null) {
+      return;
+    } else {
+      CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+      String queueName = queueToRemove.substring(
+          queueToRemove.lastIndexOf('.') + 1);
+      CSQueue queue = cs.getQueue(queueName);
+      if (queue == null ||
+          !queue.getQueuePath().equals(queueToRemove)) {
+        throw new IOException("Queue " + queueToRemove + " not found");
+      } else if (queueToRemove.lastIndexOf('.') == -1) {
+        throw new IOException("Can't remove queue " + queueToRemove);
+      }
+      String parentQueuePath = queueToRemove.substring(0, queueToRemove
+          .lastIndexOf('.'));
+      String[] siblingQueues = proposedConf.getQueues(parentQueuePath);
+      List<String> newSiblingQueues = new ArrayList<>();
+      for (String siblingQueue : siblingQueues) {
+        if (!siblingQueue.equals(queueName)) {
+          newSiblingQueues.add(siblingQueue);
+        }
+      }
+      proposedConf.setQueues(parentQueuePath, newSiblingQueues
+          .toArray(new String[0]));
+      String queuesConfig = CapacitySchedulerConfiguration.PREFIX +
+          parentQueuePath + CapacitySchedulerConfiguration.DOT +
+          CapacitySchedulerConfiguration.QUEUES;
+      if (newSiblingQueues.size() == 0) {
+        confUpdate.put(queuesConfig, null);
+      } else {
+        confUpdate.put(queuesConfig, Joiner.on(',').join(newSiblingQueues));
+      }
+      for (Map.Entry<String, String> confRemove : proposedConf.getValByRegex(
+          ".*" + queueToRemove.replaceAll("\\.", "\\.") + "\\..*")
+          .entrySet()) {
+        proposedConf.unset(confRemove.getKey());
+        confUpdate.put(confRemove.getKey(), null);
+      }
+    }
+  }
+
+  private void addQueue(
+      QueueConfigInfo addInfo, CapacitySchedulerConfiguration proposedConf,
+      Map<String, String> confUpdate) throws IOException {
+    if (addInfo == null) {
+      return;
+    } else {
+      CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+      String queuePath = addInfo.getQueue();
+      String queueName = queuePath.substring(queuePath.lastIndexOf('.') + 1);
+      if (cs.getQueue(queueName) != null) {
+        throw new IOException("Can't add existing queue " + queuePath);
+      } else if (queuePath.lastIndexOf('.') == -1) {
+        throw new IOException("Can't add invalid queue " + queuePath);
+      }
+      String parentQueue = queuePath.substring(0, queuePath.lastIndexOf('.'));
+      String[] siblings = proposedConf.getQueues(parentQueue);
+      List<String> siblingQueues = siblings == null ? new ArrayList<>() :
+          new ArrayList<>(Arrays.<String>asList(siblings));
+      siblingQueues.add(queuePath.substring(queuePath.lastIndexOf('.') + 1));
+      proposedConf.setQueues(parentQueue,
+          siblingQueues.toArray(new String[0]));
+      confUpdate.put(CapacitySchedulerConfiguration.PREFIX +
+          parentQueue + CapacitySchedulerConfiguration.DOT +
+          CapacitySchedulerConfiguration.QUEUES,
+          Joiner.on(',').join(siblingQueues));
+      String keyPrefix = CapacitySchedulerConfiguration.PREFIX +
+          queuePath + CapacitySchedulerConfiguration.DOT;
+      for (Map.Entry<String, String> kv : addInfo.getParams().entrySet()) {
+        if (kv.getValue() == null) {
+          proposedConf.unset(keyPrefix + kv.getKey());
+        } else {
+          proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
+        }
+        confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
+      }
+    }
+  }
+
+  private void updateQueue(QueueConfigInfo updateInfo,
+      CapacitySchedulerConfiguration proposedConf,
+      Map<String, String> confUpdate) {
+    if (updateInfo == null) {
+      return;
+    } else {
+      String queuePath = updateInfo.getQueue();
+      String keyPrefix = CapacitySchedulerConfiguration.PREFIX +
+          queuePath + CapacitySchedulerConfiguration.DOT;
+      for (Map.Entry<String, String> kv : updateInfo.getParams().entrySet()) {
+        if (kv.getValue() == null) {
+          proposedConf.unset(keyPrefix + kv.getKey());
+        } else {
+          proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
+        }
+        confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7003188a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
new file mode 100644
index 0000000..b20eda6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+/**
+ * Information for adding or updating a queue to scheduler configuration
+ * for this queue.
+ */
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.FIELD)
+public class QueueConfigInfo {
+
+  @XmlElement(name = "queueName")
+  private String queue;
+
+  private HashMap<String, String> params = new HashMap<>();
+
+  public QueueConfigInfo() { }
+
+  public QueueConfigInfo(String queue, Map<String, String> params) {
+    this.queue = queue;
+    this.params = new HashMap<>(params);
+  }
+
+  public String getQueue() {
+    return this.queue;
+  }
+
+  public HashMap<String, String> getParams() {
+    return this.params;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7003188a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java
new file mode 100644
index 0000000..644ec90
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import java.util.ArrayList;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+/**
+ * Information for making scheduler configuration changes (supports adding,
+ * removing, or updating a queue).
+ */
+@XmlRootElement(name = "schedConf")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class QueueConfigsUpdateInfo {
+
+  @XmlElement(name = "add")
+  private ArrayList<QueueConfigInfo> addQueueInfo = new ArrayList<>();
+
+  @XmlElement(name = "remove")
+  private ArrayList<String> removeQueueInfo = new ArrayList<>();
+
+  @XmlElement(name = "update")
+  private ArrayList<QueueConfigInfo> updateQueueInfo = new ArrayList<>();
+
+  public QueueConfigsUpdateInfo() {
+    // JAXB needs this
+  }
+
+  public ArrayList<QueueConfigInfo> getAddQueueInfo() {
+    return addQueueInfo;
+  }
+
+  public ArrayList<String> getRemoveQueueInfo() {
+    return removeQueueInfo;
+  }
+
+  public ArrayList<QueueConfigInfo> getUpdateQueueInfo() {
+    return updateQueueInfo;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7003188a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
index 3f103b1..254da31 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
@@ -77,7 +77,11 @@ public class TestMutableCSConfigurationProvider {
     assertNull(confProvider.loadConfiguration(conf).get("badKey"));
     doThrow(new IOException()).when(cs).reinitialize(any(Configuration.class),
         any(RMContext.class));
-    confProvider.mutateConfiguration(TEST_USER, badUpdate);
+    try {
+      confProvider.mutateConfiguration(TEST_USER, badUpdate);
+    } catch (IOException e) {
+      // Expected exception.
+    }
     assertNull(confProvider.loadConfiguration(conf).get("badKey"));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7003188a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
new file mode 100644
index 0000000..d149055
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
@@ -0,0 +1,477 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import com.google.inject.Guice;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.json.JSONJAXBContext;
+import com.sun.jersey.api.json.JSONMarshaller;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.QueueState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response.Status;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.StringWriter;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+/**
+ * Test scheduler configuration mutation via REST API.
+ */
+public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
+
+  private static final File CONF_FILE = new File(new File("target",
+      "test-classes"), YarnConfiguration.CS_CONFIGURATION_FILE);
+  private static final File OLD_CONF_FILE = new File(new File("target",
+      "test-classes"), YarnConfiguration.CS_CONFIGURATION_FILE + ".tmp");
+
+  private static MockRM rm;
+  private static String userName;
+  private static CapacitySchedulerConfiguration csConf;
+  private static YarnConfiguration conf;
+
+  private static class WebServletModule extends ServletModule {
+    @Override
+    protected void configureServlets() {
+      bind(JAXBContextResolver.class);
+      bind(RMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      try {
+        userName = UserGroupInformation.getCurrentUser().getShortUserName();
+      } catch (IOException ioe) {
+        throw new RuntimeException("Unable to get current user name "
+            + ioe.getMessage(), ioe);
+      }
+      csConf = new CapacitySchedulerConfiguration(new Configuration(false),
+          false);
+      setupQueueConfiguration(csConf);
+      conf = new YarnConfiguration();
+      conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+          ResourceScheduler.class);
+      conf.set(CapacitySchedulerConfiguration.CS_CONF_PROVIDER,
+          CapacitySchedulerConfiguration.STORE_CS_CONF_PROVIDER);
+      conf.set(YarnConfiguration.YARN_ADMIN_ACL, userName);
+      try {
+        if (CONF_FILE.exists()) {
+          if (!CONF_FILE.renameTo(OLD_CONF_FILE)) {
+            throw new RuntimeException("Failed to rename conf file");
+          }
+        }
+        FileOutputStream out = new FileOutputStream(CONF_FILE);
+        csConf.writeXml(out);
+        out.close();
+      } catch (IOException e) {
+        throw new RuntimeException("Failed to write XML file", e);
+      }
+      rm = new MockRM(conf);
+      bind(ResourceManager.class).toInstance(rm);
+      serve("/*").with(GuiceContainer.class);
+      filter("/*").through(TestRMWebServicesAppsModification
+          .TestRMCustomAuthFilter.class);
+    }
+  }
+
+  @Override
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+    GuiceServletConfig.setInjector(
+        Guice.createInjector(new WebServletModule()));
+  }
+
+  private static void setupQueueConfiguration(
+      CapacitySchedulerConfiguration config) {
+    config.setQueues(CapacitySchedulerConfiguration.ROOT,
+        new String[]{"a", "b", "c"});
+
+    final String a = CapacitySchedulerConfiguration.ROOT + ".a";
+    config.setCapacity(a, 25f);
+    config.setMaximumCapacity(a, 50f);
+
+    final String a1 = a + ".a1";
+    final String a2 = a + ".a2";
+    config.setQueues(a, new String[]{"a1", "a2"});
+    config.setCapacity(a1, 100f);
+    config.setCapacity(a2, 0f);
+
+    final String b = CapacitySchedulerConfiguration.ROOT + ".b";
+    config.setCapacity(b, 75f);
+
+    final String c = CapacitySchedulerConfiguration.ROOT + ".c";
+    config.setCapacity(c, 0f);
+
+    final String c1 = c + ".c1";
+    config.setQueues(c, new String[] {"c1"});
+    config.setCapacity(c1, 0f);
+  }
+
+  public TestRMWebServicesConfigurationMutation() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.yarn.server.resourcemanager.webapp")
+        .contextListenerClass(GuiceServletConfig.class)
+        .filterClass(com.google.inject.servlet.GuiceFilter.class)
+        .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
+  @Test
+  public void testAddNestedQueue() throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    // Add parent queue root.d with two children d1 and d2.
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    Map<String, String> d1Capacity = new HashMap<>();
+    d1Capacity.put(CapacitySchedulerConfiguration.CAPACITY, "25");
+    d1Capacity.put(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY, "25");
+    Map<String, String> nearEmptyCapacity = new HashMap<>();
+    nearEmptyCapacity.put(CapacitySchedulerConfiguration.CAPACITY, "1E-4");
+    nearEmptyCapacity.put(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY,
+        "1E-4");
+    Map<String, String> d2Capacity = new HashMap<>();
+    d2Capacity.put(CapacitySchedulerConfiguration.CAPACITY, "75");
+    d2Capacity.put(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY, "75");
+    QueueConfigInfo d1 = new QueueConfigInfo("root.d.d1", d1Capacity);
+    QueueConfigInfo d2 = new QueueConfigInfo("root.d.d2", d2Capacity);
+    QueueConfigInfo d = new QueueConfigInfo("root.d", nearEmptyCapacity);
+    updateInfo.getAddQueueInfo().add(d1);
+    updateInfo.getAddQueueInfo().add(d2);
+    updateInfo.getAddQueueInfo().add(d);
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    assertEquals(4, newCSConf.getQueues("root").length);
+    assertEquals(2, newCSConf.getQueues("root.d").length);
+    assertEquals(25.0f, newCSConf.getNonLabeledQueueCapacity("root.d.d1"),
+        0.01f);
+    assertEquals(75.0f, newCSConf.getNonLabeledQueueCapacity("root.d.d2"),
+        0.01f);
+  }
+
+  @Test
+  public void testAddWithUpdate() throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    // Add root.d with capacity 25, reducing root.b capacity from 75 to 50.
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    Map<String, String> dCapacity = new HashMap<>();
+    dCapacity.put(CapacitySchedulerConfiguration.CAPACITY, "25");
+    Map<String, String> bCapacity = new HashMap<>();
+    bCapacity.put(CapacitySchedulerConfiguration.CAPACITY, "50");
+    QueueConfigInfo d = new QueueConfigInfo("root.d", dCapacity);
+    QueueConfigInfo b = new QueueConfigInfo("root.b", bCapacity);
+    updateInfo.getAddQueueInfo().add(d);
+    updateInfo.getUpdateQueueInfo().add(b);
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    assertEquals(4, newCSConf.getQueues("root").length);
+    assertEquals(25.0f, newCSConf.getNonLabeledQueueCapacity("root.d"), 0.01f);
+    assertEquals(50.0f, newCSConf.getNonLabeledQueueCapacity("root.b"), 0.01f);
+  }
+
+  @Test
+  public void testRemoveQueue() throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    stopQueue("root.a.a2");
+    // Remove root.a.a2
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    updateInfo.getRemoveQueueInfo().add("root.a.a2");
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    assertEquals(1, newCSConf.getQueues("root.a").length);
+    assertEquals("a1", newCSConf.getQueues("root.a")[0]);
+  }
+
+  @Test
+  public void testRemoveParentQueue() throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    stopQueue("root.c", "root.c.c1");
+    // Remove root.c (parent queue)
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    updateInfo.getRemoveQueueInfo().add("root.c");
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    assertEquals(2, newCSConf.getQueues("root").length);
+    assertNull(newCSConf.getQueues("root.c"));
+  }
+
+  @Test
+  public void testRemoveParentQueueWithCapacity() throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    stopQueue("root.a", "root.a.a1", "root.a.a2");
+    // Remove root.a (parent queue) with capacity 25
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    updateInfo.getRemoveQueueInfo().add("root.a");
+
+    // Set root.b capacity to 100
+    Map<String, String> bCapacity = new HashMap<>();
+    bCapacity.put(CapacitySchedulerConfiguration.CAPACITY, "100");
+    QueueConfigInfo b = new QueueConfigInfo("root.b", bCapacity);
+    updateInfo.getUpdateQueueInfo().add(b);
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    assertEquals(2, newCSConf.getQueues("root").length);
+    assertEquals(100.0f, newCSConf.getNonLabeledQueueCapacity("root.b"),
+        0.01f);
+  }
+
+  @Test
+  public void testRemoveMultipleQueues() throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    stopQueue("root.b", "root.c", "root.c.c1");
+    // Remove root.b and root.c
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    updateInfo.getRemoveQueueInfo().add("root.b");
+    updateInfo.getRemoveQueueInfo().add("root.c");
+    Map<String, String> aCapacity = new HashMap<>();
+    aCapacity.put(CapacitySchedulerConfiguration.CAPACITY, "100");
+    aCapacity.put(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY, "100");
+    QueueConfigInfo configInfo = new QueueConfigInfo("root.a", aCapacity);
+    updateInfo.getUpdateQueueInfo().add(configInfo);
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    assertEquals(1, newCSConf.getQueues("root").length);
+  }
+
+  private void stopQueue(String... queuePaths) throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    // Set state of queues to STOPPED.
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    Map<String, String> stoppedParam = new HashMap<>();
+    stoppedParam.put(CapacitySchedulerConfiguration.STATE,
+        QueueState.STOPPED.toString());
+    for (String queue : queuePaths) {
+      QueueConfigInfo stoppedInfo = new QueueConfigInfo(queue, stoppedParam);
+      updateInfo.getUpdateQueueInfo().add(stoppedInfo);
+    }
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    for (String queue : queuePaths) {
+      assertEquals(QueueState.STOPPED, newCSConf.getState(queue));
+    }
+  }
+
+  @Test
+  public void testUpdateQueue() throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    // Update config value.
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    Map<String, String> updateParam = new HashMap<>();
+    updateParam.put(CapacitySchedulerConfiguration.MAXIMUM_AM_RESOURCE_SUFFIX,
+        "0.2");
+    QueueConfigInfo aUpdateInfo = new QueueConfigInfo("root.a", updateParam);
+    updateInfo.getUpdateQueueInfo().add(aUpdateInfo);
+    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+
+    assertEquals(CapacitySchedulerConfiguration
+            .DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT,
+        cs.getConfiguration()
+            .getMaximumApplicationMasterResourcePerQueuePercent("root.a"),
+        0.001f);
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf = cs.getConfiguration();
+    assertEquals(0.2f, newCSConf
+        .getMaximumApplicationMasterResourcePerQueuePercent("root.a"), 0.001f);
+
+    // Remove config. Config value should be reverted to default.
+    updateParam.put(CapacitySchedulerConfiguration.MAXIMUM_AM_RESOURCE_SUFFIX,
+        null);
+    aUpdateInfo = new QueueConfigInfo("root.a", updateParam);
+    updateInfo.getUpdateQueueInfo().clear();
+    updateInfo.getUpdateQueueInfo().add(aUpdateInfo);
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    newCSConf = cs.getConfiguration();
+    assertEquals(CapacitySchedulerConfiguration
+        .DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT, newCSConf
+            .getMaximumApplicationMasterResourcePerQueuePercent("root.a"),
+        0.001f);
+  }
+
+  @Test
+  public void testUpdateQueueCapacity() throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    // Update root.a and root.b capacity to 50.
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    Map<String, String> updateParam = new HashMap<>();
+    updateParam.put(CapacitySchedulerConfiguration.CAPACITY, "50");
+    QueueConfigInfo aUpdateInfo = new QueueConfigInfo("root.a", updateParam);
+    QueueConfigInfo bUpdateInfo = new QueueConfigInfo("root.b", updateParam);
+    updateInfo.getUpdateQueueInfo().add(aUpdateInfo);
+    updateInfo.getUpdateQueueInfo().add(bUpdateInfo);
+
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    assertEquals(50.0f, newCSConf.getNonLabeledQueueCapacity("root.a"), 0.01f);
+    assertEquals(50.0f, newCSConf.getNonLabeledQueueCapacity("root.b"), 0.01f);
+  }
+
+  @Override
+  @After
+  public void tearDown() throws Exception {
+    if (rm != null) {
+      rm.stop();
+    }
+    CONF_FILE.delete();
+    if (!OLD_CONF_FILE.renameTo(CONF_FILE)) {
+      throw new RuntimeException("Failed to re-copy old configuration file");
+    }
+    super.tearDown();
+  }
+
+  @SuppressWarnings("rawtypes")
+  private String toJson(Object nsli, Class klass) throws Exception {
+    StringWriter sw = new StringWriter();
+    JSONJAXBContext ctx = new JSONJAXBContext(klass);
+    JSONMarshaller jm = ctx.createJSONMarshaller();
+    jm.marshallToJSON(nsli, sw);
+    return sw.toString();
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: HADOOP-9849. License information is missing for native CRC32 code (Contributed by Andrew Wang via Daniel Templeton)

Posted by xg...@apache.org.
HADOOP-9849. License information is missing for native CRC32 code
(Contributed by Andrew Wang via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92243484
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92243484
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92243484

Branch: refs/heads/YARN-5734
Commit: 92243484f9b868aa18618759145e9dd554e245c5
Parents: d5b71e4
Author: Daniel Templeton <te...@apache.org>
Authored: Wed May 31 15:57:48 2017 -0700
Committer: Daniel Templeton <te...@apache.org>
Committed: Wed May 31 15:57:48 2017 -0700

----------------------------------------------------------------------
 LICENSE.txt | 47 ++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 42 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92243484/LICENSE.txt
----------------------------------------------------------------------
diff --git a/LICENSE.txt b/LICENSE.txt
index 969708f..5391fd5 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -246,11 +246,48 @@ For the org.apache.hadoop.util.bloom.* classes:
 For portions of the native implementation of slicing-by-8 CRC calculation
 in src/main/native/src/org/apache/hadoop/util:
 
-/**
- *   Copyright 2008,2009,2010 Massachusetts Institute of Technology.
- *   All rights reserved. Use of this source code is governed by a
- *   BSD-style license that can be found in the LICENSE file.
- */
+Copyright (c) 2008,2009,2010 Massachusetts Institute of Technology.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+  notice, this list of conditions and the following disclaimer in the
+  documentation and/or other materials provided with the distribution.
+* Neither the name of the Massachusetts Institute of Technology nor
+  the names of its contributors may be used to endorse or promote
+  products derived from this software without specific prior written
+  permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Other portions are under the same license from Intel:
+http://sourceforge.net/projects/slicing-by-8/
+/*++
+ *
+ * Copyright (c) 2004-2006 Intel Corporation - All Rights Reserved
+ *
+ * This software program is licensed subject to the BSD License, 
+ * available at http://www.opensource.org/licenses/bsd-license.html
+ *
+ * Abstract: The main routine
+ * 
+ --*/
 
 For src/main/native/src/org/apache/hadoop/io/compress/lz4/{lz4.h,lz4.c,lz4hc.h,lz4hc.c},
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: YARN-6641. Non-public resource localization on a bad disk causes subsequent containers failure. Contributed by Kuhu Shukla

Posted by xg...@apache.org.
YARN-6641. Non-public resource localization on a bad disk causes subsequent containers failure. Contributed by Kuhu Shukla


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aea42930
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aea42930
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aea42930

Branch: refs/heads/YARN-5734
Commit: aea42930bbb9566ea6988f684dbd72a72a2bdadf
Parents: 47474ff
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Fri May 26 09:37:56 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Fri May 26 09:37:56 2017 -0500

----------------------------------------------------------------------
 .../localizer/LocalResourcesTrackerImpl.java    | 13 ++--
 .../localizer/ResourceLocalizationService.java  | 10 +--
 .../TestLocalResourcesTrackerImpl.java          |  8 +--
 .../TestResourceLocalizationService.java        | 71 ++++++++++++++++++++
 4 files changed, 86 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aea42930/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
index 940c599..af34e92 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
@@ -94,14 +94,6 @@ class LocalResourcesTrackerImpl implements LocalResourcesTracker {
 
   public LocalResourcesTrackerImpl(String user, ApplicationId appId,
       Dispatcher dispatcher, boolean useLocalCacheDirectoryManager,
-      Configuration conf, NMStateStoreService stateStore) {
-    this(user, appId, dispatcher,
-        new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>(),
-        useLocalCacheDirectoryManager, conf, stateStore, null);
-  }
-
-  public LocalResourcesTrackerImpl(String user, ApplicationId appId,
-      Dispatcher dispatcher, boolean useLocalCacheDirectoryManager,
       Configuration conf, NMStateStoreService stateStore,
       LocalDirsHandlerService dirHandler) {
     this(user, appId, dispatcher,
@@ -528,4 +520,9 @@ class LocalResourcesTrackerImpl implements LocalResourcesTracker {
     }
     return mgr;
   }
+
+  @VisibleForTesting
+  LocalDirsHandlerService getDirsHandler() {
+    return dirsHandler;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aea42930/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 28fb53c..663bad7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -306,7 +306,7 @@ public class ResourceLocalizationService extends CompositeService
       trackerState = userResources.getPrivateTrackerState();
       if (!trackerState.isEmpty()) {
         LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user,
-            null, dispatcher, true, super.getConfig(), stateStore);
+            null, dispatcher, true, super.getConfig(), stateStore, dirsHandler);
         LocalResourcesTracker oldTracker = privateRsrc.putIfAbsent(user,
             tracker);
         if (oldTracker != null) {
@@ -322,7 +322,8 @@ public class ResourceLocalizationService extends CompositeService
           ApplicationId appId = appEntry.getKey();
           String appIdStr = appId.toString();
           LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user,
-              appId, dispatcher, false, super.getConfig(), stateStore);
+              appId, dispatcher, false, super.getConfig(), stateStore,
+              dirsHandler);
           LocalResourcesTracker oldTracker = appRsrc.putIfAbsent(appIdStr,
               tracker);
           if (oldTracker != null) {
@@ -460,10 +461,11 @@ public class ResourceLocalizationService extends CompositeService
     // 0) Create application tracking structs
     String userName = app.getUser();
     privateRsrc.putIfAbsent(userName, new LocalResourcesTrackerImpl(userName,
-        null, dispatcher, true, super.getConfig(), stateStore));
+        null, dispatcher, true, super.getConfig(), stateStore, dirsHandler));
     String appIdStr = app.getAppId().toString();
     appRsrc.putIfAbsent(appIdStr, new LocalResourcesTrackerImpl(app.getUser(),
-        app.getAppId(), dispatcher, false, super.getConfig(), stateStore));
+        app.getAppId(), dispatcher, false, super.getConfig(), stateStore,
+        dirsHandler));
     // 1) Signal container init
     //
     // This is handled by the ApplicationImpl state machine and allows

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aea42930/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
index 8d9705b..2874acb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
@@ -529,7 +529,7 @@ public class TestLocalResourcesTrackerImpl {
 
     try {
       LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user,
-          appId, dispatcher, false, conf, stateStore);
+          appId, dispatcher, false, conf, stateStore, null);
       // Container 1 needs lr1 resource
       ContainerId cId1 = BuilderUtils.newContainerId(1, 1, 1, 1);
       LocalResourceRequest lr1 = createLocalResourceRequest(user, 1, 1,
@@ -610,7 +610,7 @@ public class TestLocalResourcesTrackerImpl {
 
     try {
       LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user,
-          appId, dispatcher, false, conf, stateStore);
+          appId, dispatcher, false, conf, stateStore, null);
       // Container 1 needs lr1 resource
       ContainerId cId1 = BuilderUtils.newContainerId(1, 1, 1, 1);
       LocalResourceRequest lr1 = createLocalResourceRequest(user, 1, 1,
@@ -672,7 +672,7 @@ public class TestLocalResourcesTrackerImpl {
 
     try {
       LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user,
-          appId, dispatcher, false, conf, stateStore);
+          appId, dispatcher, false, conf, stateStore, null);
       // Container 1 needs lr1 resource
       ContainerId cId1 = BuilderUtils.newContainerId(1, 1, 1, 1);
       LocalResourceRequest lr1 = createLocalResourceRequest(user, 1, 1,
@@ -725,7 +725,7 @@ public class TestLocalResourcesTrackerImpl {
 
     try {
       LocalResourcesTrackerImpl tracker = new LocalResourcesTrackerImpl(user,
-          appId, dispatcher, true, conf, stateStore);
+          appId, dispatcher, true, conf, stateStore, null);
       LocalResourceRequest lr1 = createLocalResourceRequest(user, 1, 1,
           LocalResourceVisibility.PUBLIC);
       Assert.assertNull(tracker.getLocalizedResource(lr1));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aea42930/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
index 14eea9f..89cbeb4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
@@ -2820,4 +2820,75 @@ public class TestResourceLocalizationService {
     }
   }
 
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testDirHandler() throws Exception {
+    File f = new File(basedir.toString());
+    String[] sDirs = new String[4];
+    List<Path> localDirs = new ArrayList<Path>(sDirs.length);
+    for (int i = 0; i < 4; ++i) {
+      sDirs[i] = f.getAbsolutePath() + i;
+      localDirs.add(new Path(sDirs[i]));
+    }
+    conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs);
+    LocalizerTracker mockLocalizerTracker = mock(LocalizerTracker.class);
+    DrainDispatcher dispatcher = new DrainDispatcher();
+    dispatcher.init(conf);
+    dispatcher.start();
+    EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class);
+    dispatcher.register(ApplicationEventType.class, applicationBus);
+    EventHandler<LocalizerEvent> localizerBus = mock(EventHandler.class);
+    dispatcher.register(LocalizerEventType.class, localizerBus);
+
+    ContainerExecutor exec = mock(ContainerExecutor.class);
+    LocalDirsHandlerService mockDirsHandler =
+        mock(LocalDirsHandlerService.class);
+    doReturn(new ArrayList<String>(Arrays.asList(sDirs))).when(
+        mockDirsHandler).getLocalDirsForCleanup();
+    // setup mocks
+    DeletionService delService = mock(DeletionService.class);
+    ResourceLocalizationService rawService =
+        new ResourceLocalizationService(dispatcher, exec, delService,
+            mockDirsHandler, nmContext, metrics);
+    ResourceLocalizationService spyService = spy(rawService);
+    doReturn(mockServer).when(spyService).createServer();
+    doReturn(mockLocalizerTracker).when(spyService).createLocalizerTracker(
+        isA(Configuration.class));
+
+    final String user = "user0";
+    // init application
+    final Application app = mock(Application.class);
+    final ApplicationId appId =
+        BuilderUtils.newApplicationId(314159265358979L, 3);
+    when(app.getUser()).thenReturn(user);
+    when(app.getAppId()).thenReturn(appId);
+    when(app.toString()).thenReturn(appId.toString());
+    try {
+      spyService.init(conf);
+      spyService.start();
+
+      spyService.handle(new ApplicationLocalizationEvent(
+          LocalizationEventType.INIT_APPLICATION_RESOURCES, app));
+      dispatcher.await();
+
+      LocalResourcesTracker appTracker =
+          spyService.getLocalResourcesTracker(
+              LocalResourceVisibility.APPLICATION, user, appId);
+      LocalResourcesTracker privTracker =
+          spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE,
+              user, appId);
+      LocalResourcesTracker pubTracker =
+          spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,
+              user, appId);
+      Assert.assertNotNull("dirHandler for appTracker is null!",
+          ((LocalResourcesTrackerImpl)appTracker).getDirsHandler());
+      Assert.assertNotNull("dirHandler for privTracker is null!",
+          ((LocalResourcesTrackerImpl)privTracker).getDirsHandler());
+      Assert.assertNotNull("dirHandler for pubTracker is null!",
+          ((LocalResourcesTrackerImpl)pubTracker).getDirsHandler());
+    } finally {
+      dispatcher.stop();
+      delService.stop();
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: HDFS-11659. TestDataNodeHotSwapVolumes.testRemoveVolumeBeingWritten fail due to no DataNode available for pipeline recovery. Contributed by Lei (Eddy) Xu.

Posted by xg...@apache.org.
HDFS-11659. TestDataNodeHotSwapVolumes.testRemoveVolumeBeingWritten fail due to no DataNode available for pipeline recovery. Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91d6fe15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91d6fe15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91d6fe15

Branch: refs/heads/YARN-5734
Commit: 91d6fe151f2e3de21b0a9423ade921e771957d90
Parents: 62857be
Author: Lei Xu <le...@apache.org>
Authored: Tue May 30 11:09:03 2017 -0700
Committer: Lei Xu <le...@apache.org>
Committed: Tue May 30 11:10:12 2017 -0700

----------------------------------------------------------------------
 .../datanode/TestDataNodeHotSwapVolumes.java    | 34 ++++++++++++++++++--
 1 file changed, 32 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91d6fe15/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 5aec174..b308ca9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -97,6 +97,7 @@ public class TestDataNodeHotSwapVolumes {
   private static final int BLOCK_SIZE = 512;
   private static final int DEFAULT_STORAGES_PER_DATANODE = 2;
   private MiniDFSCluster cluster;
+  private Configuration conf;
 
   @After
   public void tearDown() {
@@ -111,7 +112,7 @@ public class TestDataNodeHotSwapVolumes {
   private void startDFSCluster(int numNameNodes, int numDataNodes,
       int storagePerDataNode) throws IOException {
     shutdown();
-    Configuration conf = new Configuration();
+    conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
 
     /*
@@ -756,7 +757,7 @@ public class TestDataNodeHotSwapVolumes {
     }
   }
 
-  @Test(timeout=180000)
+  @Test(timeout=600000)
   public void testRemoveVolumeBeingWritten()
       throws InterruptedException, TimeoutException, ReconfigurationException,
       IOException, BrokenBarrierException {
@@ -848,6 +849,9 @@ public class TestDataNodeHotSwapVolumes {
           1, fsVolumeReferences.size());
     }
 
+    // Add a new DataNode to help with the pipeline recover.
+    cluster.startDataNodes(conf, 1, true, null, null, null);
+
     // Verify the file has sufficient replications.
     DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
     // Read the content back
@@ -857,6 +861,32 @@ public class TestDataNodeHotSwapVolumes {
     if (!exceptions.isEmpty()) {
       throw new IOException(exceptions.get(0).getCause());
     }
+
+    // Write more files to make sure that the DataNode that has removed volume
+    // is still alive to receive data.
+    for (int i = 0; i < 10; i++) {
+      final Path file = new Path("/after-" + i);
+      try (FSDataOutputStream fout = fs.create(file, REPLICATION)) {
+        rb.nextBytes(writeBuf);
+        fout.write(writeBuf);
+      }
+    }
+
+    try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi
+        .getFsVolumeReferences()) {
+      assertEquals("Volume remove wasn't successful.",
+          1, fsVolumeReferences.size());
+      FsVolumeSpi volume = fsVolumeReferences.get(0);
+      String bpid = cluster.getNamesystem().getBlockPoolId();
+      FsVolumeSpi.BlockIterator blkIter = volume.newBlockIterator(bpid, "test");
+      int blockCount = 0;
+      while (!blkIter.atEnd()) {
+        blkIter.nextBlock();
+        blockCount++;
+      }
+      assertTrue(String.format("DataNode(%d) should have more than 1 blocks",
+          dataNodeIdx), blockCount > 1);
+    }
   }
 
   @Test(timeout=60000)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: YARN-5951. Changes to allow CapacityScheduler to use configuration store

Posted by xg...@apache.org.
YARN-5951. Changes to allow CapacityScheduler to use configuration store


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2713c0d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2713c0d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2713c0d

Branch: refs/heads/YARN-5734
Commit: f2713c0d2d63de29783db6ac0e03f3f762d39ae4
Parents: 8355609
Author: Jonathan Hung <jh...@linkedin.com>
Authored: Mon Jan 30 19:03:48 2017 -0800
Committer: Xuan <xg...@apache.org>
Committed: Mon Jun 5 14:03:14 2017 -0700

----------------------------------------------------------------------
 .../scheduler/capacity/CapacityScheduler.java   | 36 +++++------
 .../CapacitySchedulerConfiguration.java         | 10 +++
 .../capacity/conf/CSConfigurationProvider.java  | 46 ++++++++++++++
 .../conf/FileBasedCSConfigurationProvider.java  | 67 ++++++++++++++++++++
 .../scheduler/capacity/conf/package-info.java   | 29 +++++++++
 .../capacity/TestCapacityScheduler.java         |  4 +-
 6 files changed, 170 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2713c0d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index d3186da..a925904 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
@@ -104,6 +103,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Activi
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityDiagnosticConstant;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.AllocationState;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.CSConfigurationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.FileBasedCSConfigurationProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.KillableContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
@@ -162,6 +163,7 @@ public class CapacityScheduler extends
 
   private int offswitchPerHeartbeatLimit;
 
+  private CSConfigurationProvider csConfProvider;
 
   @Override
   public void setConf(Configuration conf) {
@@ -285,7 +287,18 @@ public class CapacityScheduler extends
       IOException {
     try {
       writeLock.lock();
-      this.conf = loadCapacitySchedulerConfiguration(configuration);
+      String confProviderStr = configuration.get(
+          CapacitySchedulerConfiguration.CS_CONF_PROVIDER,
+          CapacitySchedulerConfiguration.DEFAULT_CS_CONF_PROVIDER);
+      if (confProviderStr.equals(
+          CapacitySchedulerConfiguration.FILE_CS_CONF_PROVIDER)) {
+        this.csConfProvider = new FileBasedCSConfigurationProvider(rmContext);
+      } else {
+        throw new IOException("Invalid CS configuration provider: " +
+            confProviderStr);
+      }
+      this.csConfProvider.init(configuration);
+      this.conf = this.csConfProvider.loadConfiguration(configuration);
       validateConf(this.conf);
       this.minimumAllocation = this.conf.getMinimumAllocation();
       initMaximumResourceCapability(this.conf.getMaximumAllocation());
@@ -392,7 +405,7 @@ public class CapacityScheduler extends
       writeLock.lock();
       Configuration configuration = new Configuration(newConf);
       CapacitySchedulerConfiguration oldConf = this.conf;
-      this.conf = loadCapacitySchedulerConfiguration(configuration);
+      this.conf = csConfProvider.loadConfiguration(configuration);
       validateConf(this.conf);
       try {
         LOG.info("Re-initializing queues...");
@@ -1754,23 +1767,6 @@ public class CapacityScheduler extends
     return true;
   }
 
-  private CapacitySchedulerConfiguration loadCapacitySchedulerConfiguration(
-      Configuration configuration) throws IOException {
-    try {
-      InputStream CSInputStream =
-          this.rmContext.getConfigurationProvider()
-              .getConfigurationInputStream(configuration,
-                  YarnConfiguration.CS_CONFIGURATION_FILE);
-      if (CSInputStream != null) {
-        configuration.addResource(CSInputStream);
-        return new CapacitySchedulerConfiguration(configuration, false);
-      }
-      return new CapacitySchedulerConfiguration(configuration, true);
-    } catch (Exception e) {
-      throw new IOException(e);
-    }
-  }
-
   private String getDefaultReservationQueueName(String planQueueName) {
     return planQueueName + ReservationConstants.DEFAULT_QUEUE_SUFFIX;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2713c0d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index c3c9585..6dd7aef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -292,6 +292,16 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
   @Private
   public static final boolean DEFAULT_LAZY_PREEMPTION_ENABLED = false;
 
+  @Private
+  public static final String CS_CONF_PROVIDER = PREFIX
+      + "configuration.provider";
+
+  @Private
+  public static final String FILE_CS_CONF_PROVIDER = "file";
+
+  @Private
+  public static final String DEFAULT_CS_CONF_PROVIDER = FILE_CS_CONF_PROVIDER;
+
   AppPriorityACLConfigurationParser priorityACLConfig = new AppPriorityACLConfigurationParser();
 
   public CapacitySchedulerConfiguration() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2713c0d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
new file mode 100644
index 0000000..c9984ac
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+
+import java.io.IOException;
+
+/**
+ * Configuration provider for {@link CapacityScheduler}.
+ */
+public interface CSConfigurationProvider {
+
+  /**
+   * Initialize the configuration provider with given conf.
+   * @param conf configuration to initialize with
+   */
+  void init(Configuration conf);
+
+  /**
+   * Loads capacity scheduler configuration object.
+   * @param conf initial bootstrap configuration
+   * @return CS configuration
+   * @throws IOException if fail to retrieve configuration
+   */
+  CapacitySchedulerConfiguration loadConfiguration(Configuration conf)
+      throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2713c0d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FileBasedCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FileBasedCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FileBasedCSConfigurationProvider.java
new file mode 100644
index 0000000..51c64fa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FileBasedCSConfigurationProvider.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * {@link CapacityScheduler} configuration provider based on local
+ * {@code capacity-scheduler.xml} file.
+ */
+public class FileBasedCSConfigurationProvider implements
+    CSConfigurationProvider {
+
+  private RMContext rmContext;
+
+  /**
+   * Construct file based CS configuration provider with given context.
+   * @param rmContext the RM context
+   */
+  public FileBasedCSConfigurationProvider(RMContext rmContext) {
+    this.rmContext = rmContext;
+  }
+
+  @Override
+  public void init(Configuration conf) {}
+
+  @Override
+  public CapacitySchedulerConfiguration loadConfiguration(Configuration conf)
+      throws IOException {
+    try {
+      InputStream csInputStream =
+          this.rmContext.getConfigurationProvider()
+              .getConfigurationInputStream(conf,
+                  YarnConfiguration.CS_CONFIGURATION_FILE);
+      if (csInputStream != null) {
+        conf.addResource(csInputStream);
+        return new CapacitySchedulerConfiguration(conf, false);
+      }
+      return new CapacitySchedulerConfiguration(conf, true);
+    } catch (Exception e) {
+      throw new IOException(e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2713c0d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/package-info.java
new file mode 100644
index 0000000..08d0522
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/package-info.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package
+ * org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf
+ * contains classes related to capacity scheduler configuration management.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2713c0d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index bf1f6eb..5de6c6d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -231,13 +231,13 @@ public class TestCapacityScheduler {
 
   @Test (timeout = 30000)
   public void testConfValidation() throws Exception {
-    ResourceScheduler scheduler = new CapacityScheduler();
+    CapacityScheduler scheduler = new CapacityScheduler();
     scheduler.setRMContext(resourceManager.getRMContext());
     Configuration conf = new YarnConfiguration();
     conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 2048);
     conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 1024);
     try {
-      scheduler.reinitialize(conf, mockContext);
+      scheduler.init(conf);
       fail("Exception is expected because the min memory allocation is" +
         " larger than the max memory allocation.");
     } catch (YarnRuntimeException e) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: Fix NPE in LazyPersistFileScrubber. Contributed by Inigo Goiri.

Posted by xg...@apache.org.
Fix NPE in LazyPersistFileScrubber. Contributed by Inigo Goiri.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/303c8dc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/303c8dc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/303c8dc9

Branch: refs/heads/YARN-5734
Commit: 303c8dc9b6c853c0939ea9ba14388897cc258071
Parents: d81372d
Author: Inigo Goiri <el...@gmail.com>
Authored: Fri May 26 13:15:44 2017 -0700
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Fri May 26 13:16:01 2017 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/303c8dc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 11b62d9..997fd92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3870,9 +3870,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         while (it.hasNext()) {
           Block b = it.next();
           BlockInfo blockInfo = blockManager.getStoredBlock(b);
-          BlockCollection bc = getBlockCollection(blockInfo);
-          if (bc.getStoragePolicyID() == lpPolicy.getId()) {
-            filesToDelete.add(bc);
+          if (blockInfo == null) {
+            LOG.info("Cannot find block info for block " + b);
+          } else {
+            BlockCollection bc = getBlockCollection(blockInfo);
+            if (bc.getStoragePolicyID() == lpPolicy.getId()) {
+              filesToDelete.add(bc);
+            }
           }
         }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: HDFS-11446. TestMaintenanceState#testWithNNAndDNRestart fails intermittently. Contributed by Yiqun Lin.

Posted by xg...@apache.org.
HDFS-11446. TestMaintenanceState#testWithNNAndDNRestart fails intermittently. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31058b24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31058b24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31058b24

Branch: refs/heads/YARN-5734
Commit: 31058b243e9575d90f038bb2fdf5a556710f6f7f
Parents: 89bb8bf
Author: Yiqun Lin <yq...@apache.org>
Authored: Sun May 28 11:23:32 2017 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Sun May 28 11:23:32 2017 +0800

----------------------------------------------------------------------
 .../hadoop/hdfs/TestMaintenanceState.java       | 128 ++++++++++---------
 1 file changed, 66 insertions(+), 62 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31058b24/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
index a37bdb8..b49fba0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -30,12 +29,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
-import com.google.common.collect.Lists;
 import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.junit.Assert;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@@ -48,8 +42,16 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
+import org.junit.Assert;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Supplier;
+import com.google.common.collect.Lists;
 
 /**
  * This class tests node maintenance.
@@ -125,8 +127,8 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
 
     // When node is in ENTERING_MAINTENANCE state, it can still serve read
     // requests
-    assertNull(checkWithRetry(ns, fileSys, file, replicas, null,
-        nodeOutofService));
+    checkWithRetry(ns, fileSys, file, replicas, null,
+        nodeOutofService);
 
     putNodeInService(0, nodeOutofService.getDatanodeUuid());
 
@@ -387,8 +389,8 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
 
     // The block should be replicated to another datanode to meet
     // expected replication count.
-    assertNull(checkWithRetry(ns, fileSys, file, expectedReplicasInRead,
-        nodeOutofService));
+    checkWithRetry(ns, fileSys, file, expectedReplicasInRead,
+        nodeOutofService);
 
     cleanupFile(fileSys, file);
     teardown();
@@ -548,19 +550,19 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
         client.datanodeReport(DatanodeReportType.LIVE).length);
 
     // test 1, verify the replica in IN_MAINTENANCE state isn't in LocatedBlock
-    assertNull(checkWithRetry(ns, fileSys, file, replicas - 1,
-        nodeOutofService));
+    checkWithRetry(ns, fileSys, file, replicas - 1,
+        nodeOutofService);
 
     takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(), 0, null,
         AdminStates.DECOMMISSIONED);
 
     // test 2 after decommission has completed, the replication count is
     // replicas + 1 which includes the decommissioned node.
-    assertNull(checkWithRetry(ns, fileSys, file, replicas + 1, null));
+    checkWithRetry(ns, fileSys, file, replicas + 1, null);
 
     // test 3, put the node in service, replication count should restore.
     putNodeInService(0, nodeOutofService.getDatanodeUuid());
-    assertNull(checkWithRetry(ns, fileSys, file, replicas, null));
+    checkWithRetry(ns, fileSys, file, replicas, null);
 
     cleanupFile(fileSys, file);
   }
@@ -587,8 +589,8 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
     takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(), Long.MAX_VALUE,
         null, AdminStates.IN_MAINTENANCE);
 
-    assertNull(checkWithRetry(ns, fileSys, file, replicas - 1,
-        nodeOutofService));
+    checkWithRetry(ns, fileSys, file, replicas - 1,
+        nodeOutofService);
 
     cleanupFile(fileSys, file);
   }
@@ -631,10 +633,10 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
     takeNodeOutofService(0, decommissionDNUuid, 0, null, maintenanceNodes,
         AdminStates.DECOMMISSIONED);
     // Out of the replicas returned, one is the decommissioned node.
-    assertNull(checkWithRetry(ns, fileSys, file, repl, maintenanceDN));
+    checkWithRetry(ns, fileSys, file, repl, maintenanceDN);
 
     putNodeInService(0, maintenanceDN);
-    assertNull(checkWithRetry(ns, fileSys, file, repl + 1, null));
+    checkWithRetry(ns, fileSys, file, repl + 1, null);
 
     cleanupFile(fileSys, file);
     teardown();
@@ -663,7 +665,7 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
         AdminStates.IN_MAINTENANCE);
 
     // Verify file replication matches maintenance state min replication
-    assertNull(checkWithRetry(ns, fileSys, file, 1, null, nodes[0]));
+    checkWithRetry(ns, fileSys, file, 1, null, nodes[0]);
 
     // Put the maintenance nodes back in service
     for (DatanodeInfo datanodeInfo : maintenanceDN) {
@@ -671,7 +673,7 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
     }
 
     // Verify file replication catching up to the old state
-    assertNull(checkWithRetry(ns, fileSys, file, repl, null));
+    checkWithRetry(ns, fileSys, file, repl, null);
 
     cleanupFile(fileSys, file);
   }
@@ -720,19 +722,19 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
 
     // Verify that the nodeOutofService remains in blocksMap and
     // # of live replicas For read operation is expected.
-    assertNull(checkWithRetry(ns, fileSys, file, oldFactor - 1,
-        nodeOutofService));
+    checkWithRetry(ns, fileSys, file, oldFactor - 1,
+        nodeOutofService);
 
     final DFSClient client = getDfsClient(0);
     client.setReplication(file.toString(), (short)newFactor);
 
     // Verify that the nodeOutofService remains in blocksMap and
     // # of live replicas for read operation.
-    assertNull(checkWithRetry(ns, fileSys, file, expectedLiveReplicas,
-        nodeOutofService));
+    checkWithRetry(ns, fileSys, file, expectedLiveReplicas,
+        nodeOutofService);
 
     putNodeInService(0, nodeOutofService.getDatanodeUuid());
-    assertNull(checkWithRetry(ns, fileSys, file, newFactor, null));
+    checkWithRetry(ns, fileSys, file, newFactor, null);
 
     cleanupFile(fileSys, file);
     teardown();
@@ -765,8 +767,8 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
         getFirstBlockFirstReplicaUuid(fileSys, file), Long.MAX_VALUE, null,
         AdminStates.IN_MAINTENANCE);
 
-    assertNull(checkWithRetry(ns, fileSys, file, replicas - 1,
-        nodeOutofService));
+    checkWithRetry(ns, fileSys, file, replicas - 1,
+        nodeOutofService);
 
     final DFSClient client = getDfsClient(0);
     assertEquals("All datanodes must be alive", numDatanodes,
@@ -779,16 +781,16 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
         client.datanodeReport(DatanodeReportType.LIVE).length);
 
     // Dead maintenance node's blocks should remain in block map.
-    assertNull(checkWithRetry(ns, fileSys, file, replicas - 1,
-        nodeOutofService));
+    checkWithRetry(ns, fileSys, file, replicas - 1,
+        nodeOutofService);
 
     // When dead maintenance mode is transitioned to out of maintenance mode,
     // its blocks should be removed from block map.
     // This will then trigger replication to restore the live replicas back
     // to replication factor.
     putNodeInService(0, nodeOutofService.getDatanodeUuid());
-    assertNull(checkWithRetry(ns, fileSys, file, replicas, nodeOutofService,
-        null));
+    checkWithRetry(ns, fileSys, file, replicas, nodeOutofService,
+        null);
 
     cleanupFile(fileSys, file);
   }
@@ -821,8 +823,8 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
         getFirstBlockFirstReplicaUuid(fileSys, file), Long.MAX_VALUE, null,
         AdminStates.IN_MAINTENANCE);
 
-    assertNull(checkWithRetry(ns, fileSys, file, replicas - 1,
-        nodeOutofService));
+    checkWithRetry(ns, fileSys, file, replicas - 1,
+        nodeOutofService);
 
     DFSClient client = getDfsClient(0);
     assertEquals("All datanodes must be alive", numDatanodes,
@@ -836,23 +838,23 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
         client.datanodeReport(DatanodeReportType.LIVE).length);
 
     // Dead maintenance node's blocks should remain in block map.
-    assertNull(checkWithRetry(ns, fileSys, file, replicas - 1,
-        nodeOutofService));
+    checkWithRetry(ns, fileSys, file, replicas - 1,
+        nodeOutofService);
 
     // restart nn, nn will restore 3 live replicas given it doesn't
     // know the maintenance node has the replica.
     getCluster().restartNameNode(0);
     ns = getCluster().getNamesystem(0);
-    assertNull(checkWithRetry(ns, fileSys, file, replicas, null));
+    checkWithRetry(ns, fileSys, file, replicas, null);
 
     // restart dn, nn has 1 maintenance replica and 3 live replicas.
     getCluster().restartDataNode(dnProp, true);
     getCluster().waitActive();
-    assertNull(checkWithRetry(ns, fileSys, file, replicas, nodeOutofService));
+    checkWithRetry(ns, fileSys, file, replicas, nodeOutofService);
 
     // Put the node in service, a redundant replica should be removed.
     putNodeInService(0, nodeOutofService.getDatanodeUuid());
-    assertNull(checkWithRetry(ns, fileSys, file, replicas, null));
+    checkWithRetry(ns, fileSys, file, replicas, null);
 
     cleanupFile(fileSys, file);
   }
@@ -878,12 +880,12 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
     writeFile(fileSys, file, replicas, 2);
 
     // Verify nodeOutofService wasn't chosen for write operation.
-    assertNull(checkWithRetry(ns, fileSys, file, replicas - 1,
-        nodeOutofService, null));
+    checkWithRetry(ns, fileSys, file, replicas - 1,
+        nodeOutofService, null);
 
     // Put the node back to service, live replicas should be restored.
     putNodeInService(0, nodeOutofService.getDatanodeUuid());
-    assertNull(checkWithRetry(ns, fileSys, file, replicas, null));
+    checkWithRetry(ns, fileSys, file, replicas, null);
 
     cleanupFile(fileSys, file);
   }
@@ -934,12 +936,12 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
     client.setReplication(file.toString(), (short) 1);
 
     // Verify the nodeOutofService remains in blocksMap.
-    assertNull(checkWithRetry(ns, fileSys, file, 1, nodeOutofService));
+    checkWithRetry(ns, fileSys, file, 1, nodeOutofService);
 
     // Restart NN and verify the nodeOutofService remains in blocksMap.
     getCluster().restartNameNode(0);
     ns = getCluster().getNamesystem(0);
-    assertNull(checkWithRetry(ns, fileSys, file, 1, nodeOutofService));
+    checkWithRetry(ns, fileSys, file, 1, nodeOutofService);
 
     cleanupFile(fileSys, file);
   }
@@ -1081,30 +1083,32 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
     return null;
   }
 
-  static String checkWithRetry(FSNamesystem ns, FileSystem fileSys,
-      Path name, int repl, DatanodeInfo inMaintenanceNode)
-          throws IOException {
-    return checkWithRetry(ns, fileSys, name, repl, inMaintenanceNode,
+  static void checkWithRetry(FSNamesystem ns, FileSystem fileSys, Path name,
+      int repl, DatanodeInfo inMaintenanceNode) {
+    checkWithRetry(ns, fileSys, name, repl, inMaintenanceNode,
         inMaintenanceNode);
   }
 
-  static String checkWithRetry(FSNamesystem ns, FileSystem fileSys,
-      Path name, int repl, DatanodeInfo excludedNode,
-      DatanodeInfo underMaintenanceNode) throws IOException {
-    int tries = 0;
-    String output = null;
-    while (tries++ < 200) {
-      try {
-        Thread.sleep(100);
-        output = checkFile(ns, fileSys, name, repl, excludedNode,
-            underMaintenanceNode);
-        if (output == null) {
-          break;
+  static void checkWithRetry(final FSNamesystem ns, final FileSystem fileSys,
+      final Path name, final int repl, final DatanodeInfo excludedNode,
+      final DatanodeInfo underMaintenanceNode) {
+    try {
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+
+        @Override
+        public Boolean get() {
+          String output = null;
+          try {
+            output = checkFile(ns, fileSys, name, repl, excludedNode,
+                underMaintenanceNode);
+          } catch (Exception ignored) {
+          }
+
+          return (output == null);
         }
-      } catch (InterruptedException ie) {
-      }
+      }, 100, 60000);
+    } catch (Exception ignored) {
     }
-    return output;
   }
 
   static private DatanodeInfo[] getFirstBlockReplicasDatanodeInfos(


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: YARN-5946: Create YarnConfigurationStore interface and InMemoryConfigurationStore class. Contributed by Jonathan Hung

Posted by xg...@apache.org.
YARN-5946: Create YarnConfigurationStore interface and
InMemoryConfigurationStore class. Contributed by Jonathan Hung


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae9fc256
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae9fc256
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae9fc256

Branch: refs/heads/YARN-5734
Commit: ae9fc256634c40542c9ab543299987bc243c0cbe
Parents: f2713c0
Author: Xuan <xg...@apache.org>
Authored: Fri Feb 24 15:58:12 2017 -0800
Committer: Xuan <xg...@apache.org>
Committed: Mon Jun 5 14:03:15 2017 -0700

----------------------------------------------------------------------
 .../conf/InMemoryConfigurationStore.java        |  86 +++++++++++
 .../capacity/conf/YarnConfigurationStore.java   | 154 +++++++++++++++++++
 .../conf/TestYarnConfigurationStore.java        |  70 +++++++++
 3 files changed, 310 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae9fc256/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
new file mode 100644
index 0000000..a208fb9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A default implementation of {@link YarnConfigurationStore}. Doesn't offer
+ * persistent configuration storage, just stores the configuration in memory.
+ */
+public class InMemoryConfigurationStore implements YarnConfigurationStore {
+
+  private Configuration schedConf;
+  private LinkedList<LogMutation> pendingMutations;
+  private long pendingId;
+
+  @Override
+  public void initialize(Configuration conf, Configuration schedConf) {
+    this.schedConf = schedConf;
+    this.pendingMutations = new LinkedList<>();
+    this.pendingId = 0;
+  }
+
+  @Override
+  public synchronized long logMutation(LogMutation logMutation) {
+    logMutation.setId(++pendingId);
+    pendingMutations.add(logMutation);
+    return pendingId;
+  }
+
+  @Override
+  public synchronized boolean confirmMutation(long id, boolean isValid) {
+    LogMutation mutation = pendingMutations.poll();
+    // If confirmMutation is called out of order, discard mutations until id
+    // is reached.
+    while (mutation != null) {
+      if (mutation.getId() == id) {
+        if (isValid) {
+          Map<String, String> mutations = mutation.getUpdates();
+          for (Map.Entry<String, String> kv : mutations.entrySet()) {
+            schedConf.set(kv.getKey(), kv.getValue());
+          }
+        }
+        return true;
+      }
+      mutation = pendingMutations.poll();
+    }
+    return false;
+  }
+
+  @Override
+  public synchronized Configuration retrieve() {
+    return schedConf;
+  }
+
+  @Override
+  public synchronized List<LogMutation> getPendingMutations() {
+    return pendingMutations;
+  }
+
+  @Override
+  public List<LogMutation> getConfirmedConfHistory(long fromId) {
+    // Unimplemented.
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae9fc256/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java
new file mode 100644
index 0000000..22c0ef8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java
@@ -0,0 +1,154 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * YarnConfigurationStore exposes the methods needed for retrieving and
+ * persisting {@link CapacityScheduler} configuration via key-value
+ * using write-ahead logging. When configuration mutation is requested, caller
+ * should first log it with {@code logMutation}, which persists this pending
+ * mutation. This mutation is merged to the persisted configuration only after
+ * {@code confirmMutation} is called.
+ *
+ * On startup/recovery, caller should call {@code retrieve} to get all
+ * confirmed mutations, then get pending mutations which were not confirmed via
+ * {@code getPendingMutations}, and replay/confirm them via
+ * {@code confirmMutation} as in the normal case.
+ */
+public interface YarnConfigurationStore {
+
+  /**
+   * LogMutation encapsulates the fields needed for configuration mutation
+   * audit logging and recovery.
+   */
+  class LogMutation {
+    private Map<String, String> updates;
+    private String user;
+    private long id;
+
+    /**
+     * Create log mutation prior to logging.
+     * @param updates key-value configuration updates
+     * @param user user who requested configuration change
+     */
+    public LogMutation(Map<String, String> updates, String user) {
+      this(updates, user, 0);
+    }
+
+    /**
+     * Create log mutation for recovery.
+     * @param updates key-value configuration updates
+     * @param user user who requested configuration change
+     * @param id transaction id of configuration change
+     */
+    LogMutation(Map<String, String> updates, String user, long id) {
+      this.updates = updates;
+      this.user = user;
+      this.id = id;
+    }
+
+    /**
+     * Get key-value configuration updates.
+     * @return map of configuration updates
+     */
+    public Map<String, String> getUpdates() {
+      return updates;
+    }
+
+    /**
+     * Get user who requested configuration change.
+     * @return user who requested configuration change
+     */
+    public String getUser() {
+      return user;
+    }
+
+    /**
+     * Get transaction id of this configuration change.
+     * @return transaction id
+     */
+    public long getId() {
+      return id;
+    }
+
+    /**
+     * Set transaction id of this configuration change.
+     * @param id transaction id
+     */
+    public void setId(long id) {
+      this.id = id;
+    }
+  }
+
+  /**
+   * Initialize the configuration store.
+   * @param conf configuration to initialize store with
+   * @param schedConf Initial key-value configuration to persist
+   */
+  void initialize(Configuration conf, Configuration schedConf);
+
+  /**
+   * Logs the configuration change to backing store. Generates an id associated
+   * with this mutation, sets it in {@code logMutation}, and returns it.
+   * @param logMutation configuration change to be persisted in write ahead log
+   * @return id which configuration store associates with this mutation
+   */
+  long logMutation(LogMutation logMutation);
+
+  /**
+   * Should be called after {@code logMutation}. Gets the pending mutation
+   * associated with {@code id} and marks the mutation as persisted (no longer
+   * pending). If isValid is true, merge the mutation with the persisted
+   * configuration.
+   *
+   * If {@code confirmMutation} is called with ids in a different order than
+   * was returned by {@code logMutation}, the result is implementation
+   * dependent.
+   * @param id id of mutation to be confirmed
+   * @param isValid if true, update persisted configuration with mutation
+   *                associated with {@code id}.
+   * @return true on success
+   */
+  boolean confirmMutation(long id, boolean isValid);
+
+  /**
+   * Retrieve the persisted configuration.
+   * @return configuration as key-value
+   */
+  Configuration retrieve();
+
+  /**
+   * Get the list of pending mutations, in the order they were logged.
+   * @return list of mutations
+   */
+  List<LogMutation> getPendingMutations();
+
+  /**
+   * Get a list of confirmed configuration mutations starting from a given id.
+   * @param fromId id from which to start getting mutations, inclusive
+   * @return list of configuration mutations
+   */
+  List<LogMutation> getConfirmedConfHistory(long fromId);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae9fc256/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestYarnConfigurationStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestYarnConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestYarnConfigurationStore.java
new file mode 100644
index 0000000..dff4e77
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestYarnConfigurationStore.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore.LogMutation;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+public class TestYarnConfigurationStore {
+
+  private YarnConfigurationStore confStore;
+  private Configuration schedConf;
+
+  private static final String testUser = "testUser";
+
+  @Before
+  public void setUp() {
+    schedConf = new Configuration(false);
+    schedConf.set("key1", "val1");
+  }
+
+  @Test
+  public void testInMemoryConfigurationStore() {
+    confStore = new InMemoryConfigurationStore();
+    confStore.initialize(new Configuration(), schedConf);
+    assertEquals("val1", confStore.retrieve().get("key1"));
+
+    Map<String, String> update1 = new HashMap<>();
+    update1.put("keyUpdate1", "valUpdate1");
+    LogMutation mutation1 = new LogMutation(update1, testUser);
+    long id = confStore.logMutation(mutation1);
+    assertEquals(1, confStore.getPendingMutations().size());
+    confStore.confirmMutation(id, true);
+    assertEquals("valUpdate1", confStore.retrieve().get("keyUpdate1"));
+    assertEquals(0, confStore.getPendingMutations().size());
+
+    Map<String, String> update2 = new HashMap<>();
+    update2.put("keyUpdate2", "valUpdate2");
+    LogMutation mutation2 = new LogMutation(update2, testUser);
+    id = confStore.logMutation(mutation2);
+    assertEquals(1, confStore.getPendingMutations().size());
+    confStore.confirmMutation(id, false);
+    assertNull("Configuration should not be updated",
+        confStore.retrieve().get("keyUpdate2"));
+    assertEquals(0, confStore.getPendingMutations().size());
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: YARN-6649. RollingLevelDBTimelineServer throws RuntimeException if object decoding ever fails runtime exception. Contributed by Jon Eagles.

Posted by xg...@apache.org.
YARN-6649. RollingLevelDBTimelineServer throws RuntimeException if object decoding ever fails runtime exception. Contributed by Jon Eagles.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4369690c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4369690c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4369690c

Branch: refs/heads/YARN-5734
Commit: 4369690ce63566131aee28696bf2683a3cb20205
Parents: 1543d0f
Author: Nathan Roberts <nr...@apache.org>
Authored: Tue May 30 16:10:33 2017 -0500
Committer: Nathan Roberts <nr...@apache.org>
Committed: Wed May 31 11:32:32 2017 -0500

----------------------------------------------------------------------
 .../timeline/RollingLevelDBTimelineStore.java   | 29 ++++++++++++++++----
 1 file changed, 23 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4369690c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
index 20e0379..d139346 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -473,9 +473,16 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
         }
       } else if (key[prefixlen] == OTHER_INFO_COLUMN[0]) {
         if (otherInfo) {
-          entity.addOtherInfo(
-              parseRemainingKey(key, prefixlen + OTHER_INFO_COLUMN.length),
-              fstConf.asObject(iterator.peekNext().getValue()));
+          Object o = null;
+          String keyStr = parseRemainingKey(key,
+              prefixlen + OTHER_INFO_COLUMN.length);
+          try {
+            o = fstConf.asObject(iterator.peekNext().getValue());
+            entity.addOtherInfo(keyStr, o);
+          } catch (Exception e) {
+            LOG.warn("Error while decoding "
+                + entityId + ":otherInfo:" + keyStr, e);
+          }
         }
       } else if (key[prefixlen] == RELATED_ENTITIES_COLUMN[0]) {
         if (relatedEntities) {
@@ -1338,7 +1345,12 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
       TimelineEvent event = new TimelineEvent();
       event.setTimestamp(ts);
       event.setEventType(tstype);
-      Object o = fstConf.asObject(value);
+      Object o = null;
+      try {
+        o = fstConf.asObject(value);
+      } catch (Exception e) {
+        LOG.warn("Error while decoding " + tstype, e);
+      }
       if (o == null) {
         event.setEventInfo(null);
       } else if (o instanceof Map) {
@@ -1362,8 +1374,13 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
     KeyParser kp = new KeyParser(key, offset);
     String name = kp.getNextString();
     byte[] bytes = kp.getRemainingBytes();
-    Object value = fstConf.asObject(bytes);
-    entity.addPrimaryFilter(name, value);
+    Object value = null;
+    try {
+      value = fstConf.asObject(bytes);
+      entity.addPrimaryFilter(name, value);
+    } catch (Exception e) {
+      LOG.warn("Error while decoding " + name, e);
+    }
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org