You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zj...@apache.org on 2015/03/13 00:04:20 UTC

[01/49] hadoop git commit: YARN-1809. Synchronize RM and TimeLineServer Web-UIs. Contributed by Zhijie Shen and Xuan Gong

Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 821b68d05 -> fb1b59600


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
deleted file mode 100644
index 935be61..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp;
-
-import static org.apache.hadoop.yarn.util.StringHelper.join;
-import static org.apache.hadoop.yarn.webapp.YarnWebParams.APP_STATE;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR_VALUE;
-
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.concurrent.ConcurrentMap;
-
-import org.apache.commons.lang.StringEscapeUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
-import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
-
-import com.google.inject.Inject;
-
-class AppsBlock extends HtmlBlock {
-  final ConcurrentMap<ApplicationId, RMApp> apps;
-  private final Configuration conf;
-  final ResourceManager rm;
-  @Inject
-  AppsBlock(ResourceManager rm, ViewContext ctx, Configuration conf) {
-    super(ctx);
-    apps = rm.getRMContext().getRMApps();
-    this.conf = conf;
-    this.rm = rm;
-  }
-
-  @Override public void render(Block html) {
-    TBODY<TABLE<Hamlet>> tbody = html.
-      table("#apps").
-        thead().
-          tr().
-            th(".id", "ID").
-            th(".user", "User").
-            th(".name", "Name").
-            th(".type", "Application Type").
-            th(".queue", "Queue").
-            th(".starttime", "StartTime").
-            th(".finishtime", "FinishTime").
-            th(".state", "YarnApplicationState").
-            th(".finalstatus", "FinalStatus").
-            th(".progress", "Progress").
-            th(".ui", "Tracking UI")._()._().
-        tbody();
-    Collection<YarnApplicationState> reqAppStates = null;
-    String reqStateString = $(APP_STATE);
-    if (reqStateString != null && !reqStateString.isEmpty()) {
-      String[] appStateStrings = reqStateString.split(",");
-      reqAppStates = new HashSet<YarnApplicationState>(appStateStrings.length);
-      for(String stateString : appStateStrings) {
-        reqAppStates.add(YarnApplicationState.valueOf(stateString));
-      }
-    }
-    StringBuilder appsTableData = new StringBuilder("[\n");
-    for (RMApp app : apps.values()) {
-      if (reqAppStates != null && !reqAppStates.contains(app.createApplicationState())) {
-        continue;
-      }
-      AppInfo appInfo = new AppInfo(rm, app, true, WebAppUtils.getHttpSchemePrefix(conf));
-      String percent = String.format("%.1f", appInfo.getProgress());
-      //AppID numerical value parsed by parseHadoopID in yarn.dt.plugins.js
-      appsTableData.append("[\"<a href='")
-      .append(url("app", appInfo.getAppId())).append("'>")
-      .append(appInfo.getAppId()).append("</a>\",\"")
-      .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
-        appInfo.getUser()))).append("\",\"")
-      .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
-        appInfo.getName()))).append("\",\"")
-      .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
-        appInfo.getApplicationType()))).append("\",\"")
-      .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
-        appInfo.getQueue()))).append("\",\"")
-      .append(appInfo.getStartTime()).append("\",\"")
-      .append(appInfo.getFinishTime()).append("\",\"")
-      .append(appInfo.getState()).append("\",\"")
-      .append(appInfo.getFinalStatus() == FinalApplicationStatus.UNDEFINED ?
-          "N/A" : appInfo.getFinalStatus()).append("\",\"")
-      // Progress bar
-      .append("<br title='").append(percent)
-      .append("'> <div class='").append(C_PROGRESSBAR).append("' title='")
-      .append(join(percent, '%')).append("'> ").append("<div class='")
-      .append(C_PROGRESSBAR_VALUE).append("' style='")
-      .append(join("width:", percent, '%')).append("'> </div> </div>")
-      .append("\",\"<a href='");
-
-      String trackingURL =
-        !appInfo.isTrackingUrlReady()? "#" : appInfo.getTrackingUrlPretty();
-      
-      appsTableData.append(trackingURL).append("'>")
-      .append(appInfo.getTrackingUI()).append("</a>\"],\n");
-
-    }
-    if(appsTableData.charAt(appsTableData.length() - 2) == ',') {
-      appsTableData.delete(appsTableData.length()-2, appsTableData.length()-1);
-    }
-    appsTableData.append("]");
-    html.script().$type("text/javascript").
-    _("var appsTableData=" + appsTableData)._();
-
-    tbody._()._();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java
index 6d461f6..cb0836a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
+import org.apache.hadoop.yarn.server.webapp.AppsBlock;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index 83df72b..028bb31 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedule
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerLeafQueueInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo;
+import org.apache.hadoop.yarn.server.webapp.AppsBlock;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java
new file mode 100644
index 0000000..b8cd1ad
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+
+import org.apache.hadoop.yarn.server.webapp.ContainerBlock;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
+
+
+public class ContainerPage extends RmView {
+
+  @Override
+  protected void preHead(Page.HTML<_> html) {
+    commonPreHead(html);
+
+    String containerId = $(YarnWebParams.CONTAINER_ID);
+    set(TITLE, containerId.isEmpty() ? "Bad request: missing container ID"
+        : join("Container ", $(YarnWebParams.CONTAINER_ID)));
+  }
+
+  @Override
+  protected Class<? extends SubView> content() {
+    return ContainerBlock.class;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
index e05987b..1c8828c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
@@ -23,6 +23,7 @@ import static org.apache.hadoop.yarn.util.StringHelper.join;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo;
+import org.apache.hadoop.yarn.server.webapp.AppsBlock;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
index 8c54f4e..97ab872 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedule
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FairSchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FairSchedulerLeafQueueInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FairSchedulerQueueInfo;
+import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
@@ -234,6 +235,11 @@ public class FairSchedulerPage extends RmView {
     return QueuesBlock.class;
   }
 
+  @Override
+  protected String initAppsTable() {
+    return WebPageUtils.appsTableInit(true);
+  }
+
   static String percent(float f) {
     return String.format("%.1f%%", f * 100);
   }
@@ -245,19 +251,4 @@ public class FairSchedulerPage extends RmView {
   static String left(float f) {
     return String.format("left:%.1f%%", f * 100);
   }
-  
-  @Override
-  protected String getAppsTableColumnDefs() {
-    StringBuilder sb = new StringBuilder();
-    return sb
-      .append("[\n")
-      .append("{'sType':'numeric', 'aTargets': [0]")
-      .append(", 'mRender': parseHadoopID }")
-
-      .append("\n, {'sType':'numeric', 'aTargets': [6, 7]")
-      .append(", 'mRender': renderHadoopDate }")
-
-      .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets': [9]")
-      .append(", 'mRender': parseHadoopProgress }]").toString();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
index c0e6834..4189053 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
@@ -23,6 +23,7 @@ import static org.apache.hadoop.yarn.util.StringHelper.pajoin;
 import java.net.InetSocketAddress;
 
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.util.RMHAUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
@@ -53,6 +54,7 @@ public class RMWebApp extends WebApp implements YarnWebParams {
 
     if (rm != null) {
       bind(ResourceManager.class).toInstance(rm);
+      bind(ApplicationBaseProtocol.class).toInstance(rm.getClientRMService());
     }
     route("/", RmController.class);
     route(pajoin("/nodes", NODE_STATE), RmController.class, "nodes");
@@ -62,6 +64,9 @@ public class RMWebApp extends WebApp implements YarnWebParams {
     route("/scheduler", RmController.class, "scheduler");
     route(pajoin("/queue", QUEUE_NAME), RmController.class, "queue");
     route("/nodelabels", RmController.class, "nodelabels");
+    route(pajoin("/appattempt", APPLICATION_ATTEMPT_ID), RmController.class,
+      "appattempt");
+    route(pajoin("/container", CONTAINER_ID), RmController.class, "container");
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
index 972432b..cc5232e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
@@ -54,6 +54,14 @@ public class RmController extends Controller {
     render(AppPage.class);
   }
 
+  public void appattempt() {
+    render(AppAttemptPage.class);
+  }
+
+  public void container() {
+    render(ContainerPage.class);
+  }
+
   public void nodes() {
     render(NodesPage.class);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
index 769c4da..1a437f8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
+import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout;
 
@@ -35,7 +36,7 @@ public class RmView extends TwoColumnLayout {
   protected void preHead(Page.HTML<_> html) {
     commonPreHead(html);
     set(DATATABLES_ID, "apps");
-    set(initID(DATATABLES, "apps"), appsTableInit());
+    set(initID(DATATABLES, "apps"), initAppsTable());
     setTableStyles(html, "apps", ".queue {width:6em}", ".ui {width:8em}");
 
     // Set the correct title.
@@ -59,31 +60,7 @@ public class RmView extends TwoColumnLayout {
     return AppsBlockWithMetrics.class;
   }
 
-  private String appsTableInit() {
-    // id, user, name, queue, starttime, finishtime, state, status, progress, ui
-    return tableInit()
-      .append(", 'aaData': appsTableData")
-      .append(", bDeferRender: true")
-      .append(", bProcessing: true")
-
-      .append("\n, aoColumnDefs: ")
-      .append(getAppsTableColumnDefs())
-
-      // Sort by id upon page load
-      .append(", aaSorting: [[0, 'desc']]}").toString();
-  }
-  
-  protected String getAppsTableColumnDefs() {
-    StringBuilder sb = new StringBuilder();
-    return sb
-      .append("[\n")
-      .append("{'sType':'string', 'aTargets': [0]")
-      .append(", 'mRender': parseHadoopID }")
-
-      .append("\n, {'sType':'numeric', 'aTargets': [5, 6]")
-      .append(", 'mRender': renderHadoopDate }")
-
-      .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets': [9]")
-      .append(", 'mRender': parseHadoopProgress }]").toString();
+  protected String initAppsTable() {
+    return WebPageUtils.appsTableInit();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java
index 9732c19..8c7b14d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java
@@ -23,6 +23,7 @@ import static org.mockito.Mockito.when;
 
 import java.io.IOException;
 
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -32,6 +33,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
+import org.apache.hadoop.yarn.server.webapp.AppBlock;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.apache.hadoop.yarn.webapp.test.WebAppTests;
 import org.junit.Test;
@@ -75,8 +77,10 @@ public class TestAppPage {
               @Override
               public void configure(Binder binder) {
                 try {
-                  binder.bind(ResourceManager.class).toInstance(
-                      TestRMWebApp.mockRm(rmContext));
+                  ResourceManager rm = TestRMWebApp.mockRm(rmContext);
+                  binder.bind(ResourceManager.class).toInstance(rm);
+                  binder.bind(ApplicationBaseProtocol.class).toInstance(
+                    rm.getClientRMService());
                 } catch (IOException e) {
                   throw new IllegalStateException(e);
                 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
index fb1e61d..481a53b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
@@ -21,19 +21,30 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 import static org.apache.hadoop.yarn.server.resourcemanager.MockNodes.newResource;
 import static org.apache.hadoop.yarn.webapp.Params.TITLE;
 import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
@@ -54,6 +65,7 @@ import org.apache.hadoop.yarn.util.StringHelper;
 import org.apache.hadoop.yarn.webapp.WebApps;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.apache.hadoop.yarn.webapp.test.WebAppTests;
+import org.junit.Assert;
 import org.junit.Test;
 
 import com.google.common.collect.Maps;
@@ -87,7 +99,10 @@ public class TestRMWebApp {
       @Override
       public void configure(Binder binder) {
         try {
-          binder.bind(ResourceManager.class).toInstance(mockRm(3, 1, 2, 8*GiB));
+          ResourceManager mockRm = mockRm(3, 1, 2, 8*GiB);
+          binder.bind(ResourceManager.class).toInstance(mockRm);
+          binder.bind(ApplicationBaseProtocol.class)
+              .toInstance(mockRm.getClientRMService());
         } catch (IOException e) {
           throw new IllegalStateException(e);
         }
@@ -194,9 +209,11 @@ public class TestRMWebApp {
     ResourceManager rm = mock(ResourceManager.class);
     ResourceScheduler rs = mockCapacityScheduler();
     ApplicationACLsManager aclMgr = mockAppACLsManager();
+    ClientRMService clientRMService = mockClientRMService(rmContext);
     when(rm.getResourceScheduler()).thenReturn(rs);
     when(rm.getRMContext()).thenReturn(rmContext);
     when(rm.getApplicationACLsManager()).thenReturn(aclMgr);
+    when(rm.getClientRMService()).thenReturn(clientRMService);
     return rm;
   }
 
@@ -222,6 +239,35 @@ public class TestRMWebApp {
     return new ApplicationACLsManager(conf);
   }
 
+  public static ClientRMService mockClientRMService(RMContext rmContext) {
+    ClientRMService clientRMService = mock(ClientRMService.class);
+    List<ApplicationReport> appReports = new ArrayList<ApplicationReport>();
+    for (RMApp app : rmContext.getRMApps().values()) {
+      ApplicationReport appReport =
+          ApplicationReport.newInstance(
+              app.getApplicationId(), (ApplicationAttemptId) null,
+              app.getUser(), app.getQueue(),
+              app.getName(), (String) null, 0, (Token) null,
+              app.createApplicationState(),
+              app.getDiagnostics().toString(), (String) null,
+              app.getStartTime(), app.getFinishTime(),
+              app.getFinalApplicationStatus(),
+              (ApplicationResourceUsageReport) null, app.getTrackingUrl(),
+              app.getProgress(), app.getApplicationType(), (Token) null);
+      appReports.add(appReport);
+    }
+    GetApplicationsResponse response = mock(GetApplicationsResponse.class);
+    when(response.getApplicationList()).thenReturn(appReports);
+    try {
+      when(clientRMService.getApplications(any(GetApplicationsRequest.class)))
+          .thenReturn(response);
+    } catch (YarnException e) {
+      Assert.fail("Exception is not expteced.");
+    }
+    return clientRMService;
+  }
+
+
   static void setupQueueConfiguration(CapacitySchedulerConfiguration conf) {
     // Define top-level queues
     conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b", "c"});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java
index b850a5e..06fa0d4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java
@@ -24,10 +24,12 @@ import com.google.inject.Injector;
 import com.google.inject.Module;
 
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
@@ -73,7 +75,8 @@ public class TestRMWebAppFairScheduler {
                   mockRm(rmContext);
               binder.bind(ResourceManager.class).toInstance
                   (mockRmWithFairScheduler);
-
+              binder.bind(ApplicationBaseProtocol.class).toInstance(
+                mockRmWithFairScheduler.getClientRMService());
             } catch (IOException e) {
               throw new IllegalStateException(e);
             }
@@ -112,6 +115,8 @@ public class TestRMWebAppFairScheduler {
                   mockRmWithApps(rmContext);
               binder.bind(ResourceManager.class).toInstance
                   (mockRmWithFairScheduler);
+              binder.bind(ApplicationBaseProtocol.class).toInstance(
+                  mockRmWithFairScheduler.getClientRMService());
 
             } catch (IOException e) {
               throw new IllegalStateException(e);
@@ -168,8 +173,10 @@ public class TestRMWebAppFairScheduler {
       IOException {
     ResourceManager rm = mock(ResourceManager.class);
     ResourceScheduler rs = mockFairScheduler();
+    ClientRMService clientRMService = mockClientRMService(rmContext);
     when(rm.getResourceScheduler()).thenReturn(rs);
     when(rm.getRMContext()).thenReturn(rmContext);
+    when(rm.getClientRMService()).thenReturn(clientRMService);
     return rm;
   }
 
@@ -188,8 +195,10 @@ public class TestRMWebAppFairScheduler {
       IOException {
     ResourceManager rm = mock(ResourceManager.class);
     ResourceScheduler rs =  mockFairSchedulerWithoutApps(rmContext);
+    ClientRMService clientRMService = mockClientRMService(rmContext);
     when(rm.getResourceScheduler()).thenReturn(rs);
     when(rm.getRMContext()).thenReturn(rmContext);
+    when(rm.getClientRMService()).thenReturn(clientRMService);
     return rm;
   }
 
@@ -213,4 +222,7 @@ public class TestRMWebAppFairScheduler {
     return fs;
   }
 
+  public static ClientRMService mockClientRMService(RMContext rmContext) {
+    return mock(ClientRMService.class);
+  }
 }


[47/49] hadoop git commit: YARN-3154. Added additional APIs in LogAggregationContext to avoid aggregating running logs of application when rolling is enabled. Contributed by Xuan Gong.

Posted by zj...@apache.org.
YARN-3154. Added additional APIs in LogAggregationContext to avoid aggregating running logs of application when rolling is enabled. Contributed by Xuan Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/863079bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/863079bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/863079bb

Branch: refs/heads/YARN-2928
Commit: 863079bb874ba77918ca1c0741eae10e245995c8
Parents: b49c3a1
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Thu Mar 12 13:32:29 2015 -0700
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Thu Mar 12 13:32:29 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../yarn/api/records/LogAggregationContext.java | 79 ++++++++++++++++++--
 .../src/main/proto/yarn_protos.proto            |  2 +
 .../impl/pb/LogAggregationContextPBImpl.java    | 39 ++++++++++
 .../logaggregation/AggregatedLogFormat.java     | 39 +++++-----
 .../logaggregation/AppLogAggregatorImpl.java    | 22 ++++--
 .../TestContainerManagerRecovery.java           | 10 ++-
 .../TestLogAggregationService.java              | 73 +++++++++++++++---
 .../capacity/TestContainerAllocation.java       |  8 +-
 9 files changed, 227 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/863079bb/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 11d1cc9..80d2697 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -757,6 +757,9 @@ Release 2.7.0 - UNRELEASED
 
     YARN-3338. Exclude jline dependency from YARN. (Zhijie Shen via xgong)
 
+    YARN-3154. Added additional APIs in LogAggregationContext to avoid aggregating
+    running logs of application when rolling is enabled. (Xuan Gong via vinodkv)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/863079bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationContext.java
index 46c1809..e582d2c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationContext.java
@@ -32,11 +32,20 @@ import org.apache.hadoop.yarn.util.Records;
  *   <ul>
  *     <li>includePattern. It uses Java Regex to filter the log files
  *     which match the defined include pattern and those log files
- *     will be uploaded. </li>
+ *     will be uploaded when the application finishes. </li>
  *     <li>excludePattern. It uses Java Regex to filter the log files
  *     which match the defined exclude pattern and those log files
- *     will not be uploaded. If the log file name matches both the
- *     include and the exclude pattern, this file will be excluded eventually</li>
+ *     will not be uploaded when application finishes. If the log file
+ *     name matches both the include and the exclude pattern, this file
+ *     will be excluded eventually</li>
+ *     <li>rolledLogsIncludePattern. It uses Java Regex to filter the log files
+ *     which match the defined include pattern and those log files
+ *     will be aggregated in a rolling fashion.</li>
+ *     <li>rolledLogsExcludePattern. It uses Java Regex to filter the log files
+ *     which match the defined exclude pattern and those log files
+ *     will not be aggregated in a rolling fashion. If the log file
+ *     name matches both the include and the exclude pattern, this file
+ *     will be excluded eventually</li>
  *   </ul>
  * </p>
  *
@@ -57,8 +66,23 @@ public abstract class LogAggregationContext {
     return context;
   }
 
+  @Public
+  @Unstable
+  public static LogAggregationContext newInstance(String includePattern,
+      String excludePattern, String rolledLogsIncludePattern,
+      String rolledLogsExcludePattern) {
+    LogAggregationContext context =
+        Records.newRecord(LogAggregationContext.class);
+    context.setIncludePattern(includePattern);
+    context.setExcludePattern(excludePattern);
+    context.setRolledLogsIncludePattern(rolledLogsIncludePattern);
+    context.setRolledLogsExcludePattern(rolledLogsExcludePattern);
+    return context;
+  }
+
   /**
-   * Get include pattern
+   * Get include pattern. This includePattern only takes affect
+   * on logs that exist at the time of application finish.
    *
    * @return include pattern
    */
@@ -67,7 +91,8 @@ public abstract class LogAggregationContext {
   public abstract String getIncludePattern();
 
   /**
-   * Set include pattern
+   * Set include pattern. This includePattern only takes affect
+   * on logs that exist at the time of application finish.
    *
    * @param includePattern
    */
@@ -76,7 +101,8 @@ public abstract class LogAggregationContext {
   public abstract void setIncludePattern(String includePattern);
 
   /**
-   * Get exclude pattern
+   * Get exclude pattern. This excludePattern only takes affect
+   * on logs that exist at the time of application finish.
    *
    * @return exclude pattern
    */
@@ -85,11 +111,50 @@ public abstract class LogAggregationContext {
   public abstract String getExcludePattern();
 
   /**
-   * Set exclude pattern
+   * Set exclude pattern. This excludePattern only takes affect
+   * on logs that exist at the time of application finish.
    *
    * @param excludePattern
    */
   @Public
   @Unstable
   public abstract void setExcludePattern(String excludePattern);
+
+  /**
+   * Get include pattern in a rolling fashion.
+   * 
+   * @return include pattern
+   */
+  @Public
+  @Unstable
+  public abstract String getRolledLogsIncludePattern();
+
+  /**
+   * Set include pattern in a rolling fashion.
+   * 
+   * @param rolledLogsIncludePattern
+   */
+  @Public
+  @Unstable
+  public abstract void setRolledLogsIncludePattern(
+      String rolledLogsIncludePattern);
+
+  /**
+   * Get exclude pattern for aggregation in a rolling fashion.
+   * 
+   * @return exclude pattern
+   */
+  @Public
+  @Unstable
+  public abstract String getRolledLogsExcludePattern();
+
+  /**
+   * Set exclude pattern for in a rolling fashion.
+   * 
+   * @param rolledLogsExcludePattern
+   */
+  @Public
+  @Unstable
+  public abstract void setRolledLogsExcludePattern(
+      String rolledLogsExcludePattern);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/863079bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 90706ed..2edff99 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -314,6 +314,8 @@ message ApplicationSubmissionContextProto {
 message LogAggregationContextProto {
  optional string include_pattern = 1 [default = ".*"];
  optional string exclude_pattern = 2 [default = ""];
+ optional string rolled_logs_include_pattern = 3 [default = ""];
+ optional string rolled_logs_exclude_pattern = 4 [default = ".*"];
 }
 
 enum ApplicationAccessTypeProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/863079bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LogAggregationContextPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LogAggregationContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LogAggregationContextPBImpl.java
index dc7a21d..f6409bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LogAggregationContextPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LogAggregationContextPBImpl.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.api.records.impl.pb;
 import org.apache.hadoop.yarn.api.records.LogAggregationContext;
 import org.apache.hadoop.yarn.proto.YarnProtos.LogAggregationContextProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.LogAggregationContextProtoOrBuilder;
+
 import com.google.protobuf.TextFormat;
 
 public class LogAggregationContextPBImpl extends LogAggregationContext{
@@ -116,4 +117,42 @@ public class LogAggregationContextPBImpl extends LogAggregationContext{
     }
     builder.setExcludePattern(excludePattern);
   }
+
+  @Override
+  public String getRolledLogsIncludePattern() {
+    LogAggregationContextProtoOrBuilder p = viaProto ? proto : builder;
+    if (! p.hasRolledLogsIncludePattern()) {
+      return null;
+    }
+    return p.getRolledLogsIncludePattern();
+  }
+
+  @Override
+  public void setRolledLogsIncludePattern(String rolledLogsIncludePattern) {
+    maybeInitBuilder();
+    if (rolledLogsIncludePattern == null) {
+      builder.clearRolledLogsIncludePattern();
+      return;
+    }
+    builder.setRolledLogsIncludePattern(rolledLogsIncludePattern);
+  }
+
+  @Override
+  public String getRolledLogsExcludePattern() {
+    LogAggregationContextProtoOrBuilder p = viaProto ? proto : builder;
+    if (! p.hasRolledLogsExcludePattern()) {
+      return null;
+    }
+    return p.getRolledLogsExcludePattern();
+  }
+
+  @Override
+  public void setRolledLogsExcludePattern(String rolledLogsExcludePattern) {
+    maybeInitBuilder();
+    if (rolledLogsExcludePattern == null) {
+      builder.clearRolledLogsExcludePattern();
+      return;
+    }
+    builder.setRolledLogsExcludePattern(rolledLogsExcludePattern);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/863079bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index b669332..29122dc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -167,17 +167,18 @@ public class AggregatedLogFormat {
     private Set<File> uploadedFiles = new HashSet<File>();
     private final Set<String> alreadyUploadedLogFiles;
     private Set<String> allExistingFileMeta = new HashSet<String>();
+    private final boolean appFinished;
     // TODO Maybe add a version string here. Instead of changing the version of
     // the entire k-v format
 
     public LogValue(List<String> rootLogDirs, ContainerId containerId,
         String user) {
-      this(rootLogDirs, containerId, user, null, new HashSet<String>());
+      this(rootLogDirs, containerId, user, null, new HashSet<String>(), true);
     }
 
     public LogValue(List<String> rootLogDirs, ContainerId containerId,
         String user, LogAggregationContext logAggregationContext,
-        Set<String> alreadyUploadedLogFiles) {
+        Set<String> alreadyUploadedLogFiles, boolean appFinished) {
       this.rootLogDirs = new ArrayList<String>(rootLogDirs);
       this.containerId = containerId;
       this.user = user;
@@ -186,6 +187,7 @@ public class AggregatedLogFormat {
       Collections.sort(this.rootLogDirs);
       this.logAggregationContext = logAggregationContext;
       this.alreadyUploadedLogFiles = alreadyUploadedLogFiles;
+      this.appFinished = appFinished;
     }
 
     private Set<File> getPendingLogFilesToUploadForThisContainer() {
@@ -296,17 +298,15 @@ public class AggregatedLogFormat {
       }
 
       if (this.logAggregationContext != null && candidates.size() > 0) {
-        if (this.logAggregationContext.getIncludePattern() != null
-            && !this.logAggregationContext.getIncludePattern().isEmpty()) {
-          filterFiles(this.logAggregationContext.getIncludePattern(),
-              candidates, false);
-        }
+        filterFiles(
+          this.appFinished ? this.logAggregationContext.getIncludePattern()
+              : this.logAggregationContext.getRolledLogsIncludePattern(),
+          candidates, false);
 
-        if (this.logAggregationContext.getExcludePattern() != null
-            && !this.logAggregationContext.getExcludePattern().isEmpty()) {
-          filterFiles(this.logAggregationContext.getExcludePattern(),
-              candidates, true);
-        }
+        filterFiles(
+          this.appFinished ? this.logAggregationContext.getExcludePattern()
+              : this.logAggregationContext.getRolledLogsExcludePattern(),
+          candidates, true);
 
         Iterable<File> mask =
             Iterables.filter(candidates, new Predicate<File>() {
@@ -323,14 +323,15 @@ public class AggregatedLogFormat {
 
     private void filterFiles(String pattern, Set<File> candidates,
         boolean exclusion) {
-      Pattern filterPattern =
-          Pattern.compile(pattern);
-      for (Iterator<File> candidatesItr = candidates.iterator(); candidatesItr
+      if (pattern != null && !pattern.isEmpty()) {
+        Pattern filterPattern = Pattern.compile(pattern);
+        for (Iterator<File> candidatesItr = candidates.iterator(); candidatesItr
           .hasNext();) {
-        File candidate = candidatesItr.next();
-        boolean match = filterPattern.matcher(candidate.getName()).find();
-        if ((!match && !exclusion) || (match && exclusion)) {
-          candidatesItr.remove();
+          File candidate = candidatesItr.next();
+          boolean match = filterPattern.matcher(candidate.getName()).find();
+          if ((!match && !exclusion) || (match && exclusion)) {
+            candidatesItr.remove();
+          }
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/863079bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index 787422b..ff70a68 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -116,6 +115,7 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
   private final Context context;
   private final int retentionSize;
   private final long rollingMonitorInterval;
+  private final boolean logAggregationInRolling;
   private final NodeId nodeId;
   // This variable is only for testing
   private final AtomicBoolean waiting = new AtomicBoolean(false);
@@ -193,9 +193,14 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
       }
       this.rollingMonitorInterval = configuredRollingMonitorInterval;
     }
+    this.logAggregationInRolling =
+        this.rollingMonitorInterval <= 0 || this.logAggregationContext == null
+            || this.logAggregationContext.getRolledLogsIncludePattern() == null
+            || this.logAggregationContext.getRolledLogsIncludePattern()
+              .isEmpty() ? false : true;
   }
 
-  private void uploadLogsForContainers() {
+  private void uploadLogsForContainers(boolean appFinished) {
     if (this.logAggregationDisabled) {
       return;
     }
@@ -262,7 +267,7 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
           containerLogAggregators.put(container, aggregator);
         }
         Set<Path> uploadedFilePathsInThisCycle =
-            aggregator.doContainerLogAggregation(writer);
+            aggregator.doContainerLogAggregation(writer, appFinished);
         if (uploadedFilePathsInThisCycle.size() > 0) {
           uploadedLogsInThisCycle = true;
         }
@@ -394,12 +399,12 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
       synchronized(this) {
         try {
           waiting.set(true);
-          if (this.rollingMonitorInterval > 0) {
+          if (logAggregationInRolling) {
             wait(this.rollingMonitorInterval * 1000);
             if (this.appFinishing.get() || this.aborted.get()) {
               break;
             }
-            uploadLogsForContainers();
+            uploadLogsForContainers(false);
           } else {
             wait(THREAD_SLEEP_TIME);
           }
@@ -415,7 +420,7 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
     }
 
     // App is finished, upload the container logs.
-    uploadLogsForContainers();
+    uploadLogsForContainers(true);
 
     // Remove the local app-log-dirs
     List<Path> localAppLogDirs = new ArrayList<Path>();
@@ -536,7 +541,8 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
       this.containerId = containerId;
     }
 
-    public Set<Path> doContainerLogAggregation(LogWriter writer) {
+    public Set<Path> doContainerLogAggregation(LogWriter writer,
+        boolean appFinished) {
       LOG.info("Uploading logs for container " + containerId
           + ". Current good log dirs are "
           + StringUtils.join(",", dirsHandler.getLogDirs()));
@@ -544,7 +550,7 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
       final LogValue logValue =
           new LogValue(dirsHandler.getLogDirs(), containerId,
             userUgi.getShortUserName(), logAggregationContext,
-            this.uploadedFileMeta);
+            this.uploadedFileMeta, appFinished);
       try {
         writer.append(logKey, logValue);
       } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/863079bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
index a73d583..c45ffbb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
@@ -130,8 +130,10 @@ public class TestContainerManagerRecovery {
         containerTokens, acls);
     // create the logAggregationContext
     LogAggregationContext logAggregationContext =
-        LogAggregationContext.newInstance("includePattern", "excludePattern");
-    StartContainersResponse startResponse = startContainer(context, cm, cid,
+        LogAggregationContext.newInstance("includePattern", "excludePattern",
+          "includePatternInRollingAggregation",
+          "excludePatternInRollingAggregation");
+   StartContainersResponse startResponse = startContainer(context, cm, cid,
         clc, logAggregationContext);
     assertTrue(startResponse.getFailedRequests().isEmpty());
     assertEquals(1, context.getApplications().size());
@@ -171,6 +173,10 @@ public class TestContainerManagerRecovery {
       recovered.getIncludePattern());
     assertEquals(logAggregationContext.getExcludePattern(),
       recovered.getExcludePattern());
+    assertEquals(logAggregationContext.getRolledLogsIncludePattern(),
+      recovered.getRolledLogsIncludePattern());
+    assertEquals(logAggregationContext.getRolledLogsExcludePattern(),
+      recovered.getRolledLogsExcludePattern());
 
     waitForAppState(app, ApplicationState.INITING);
     assertTrue(context.getApplicationACLsManager().checkAccess(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/863079bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
index 901e45a..df51a0d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
@@ -698,7 +698,7 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     }
   }
 
-  private String verifyContainerLogs(LogAggregationService logAggregationService,
+  private LogFileStatusInLastCycle verifyContainerLogs(LogAggregationService logAggregationService,
       ApplicationId appId, ContainerId[] expectedContainerIds,
       String[] logFiles, int numOfContainerLogs, boolean multiLogs)
       throws IOException {
@@ -743,7 +743,9 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
         new AggregatedLogFormat.LogReader(this.conf, targetNodeFile.getPath());
     Assert.assertEquals(this.user, reader.getApplicationOwner());
     verifyAcls(reader.getApplicationAcls());
-    
+
+    List<String> fileTypes = new ArrayList<String>();
+
     try {
       Map<String, Map<String, String>> logMap =
           new HashMap<String, Map<String, String>>();
@@ -769,6 +771,7 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
 
             Assert.assertEquals("LogType:", writtenLines[0].substring(0, 8));
             String fileType = writtenLines[0].substring(8);
+            fileTypes.add(fileType);
 
             Assert.assertEquals("LogLength:", writtenLines[1].substring(0, 10));
             String fileLengthStr = writtenLines[1].substring(10);
@@ -811,7 +814,7 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
         Assert.assertEquals(0, thisContainerMap.size());
       }
       Assert.assertEquals(0, logMap.size());
-      return targetNodeFile.getPath().getName();
+      return new LogFileStatusInLastCycle(targetNodeFile.getPath().getName(), fileTypes);
     } finally {
       reader.close();
     }
@@ -1289,6 +1292,12 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
       throws Exception {
     LogAggregationContext logAggregationContextWithInterval =
         Records.newRecord(LogAggregationContext.class);
+    // set IncludePattern/excludePattern in rolling fashion
+    // we expect all the logs except std_final will be uploaded
+    // when app is running. The std_final will be uploaded when
+    // the app finishes.
+    logAggregationContextWithInterval.setRolledLogsIncludePattern(".*");
+    logAggregationContextWithInterval.setRolledLogsExcludePattern("std_final");
     this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
     this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
       this.remoteRootLogDir.getAbsolutePath());
@@ -1338,9 +1347,14 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
       this.user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls,
       logAggregationContextWithInterval));
 
+    LogFileStatusInLastCycle logFileStatusInLastCycle = null;
     // Simulate log-file creation
-    String[] logFiles1 = new String[] { "stdout", "stderr", "syslog" };
-    writeContainerLogs(appLogDir, container, logFiles1);
+    // create std_final in log directory which will not be aggregated
+    // until the app finishes.
+    String[] logFiles1WithFinalLog =
+        new String[] { "stdout", "stderr", "syslog", "std_final" };
+    String[] logFiles1 = new String[] { "stdout", "stderr", "syslog"};
+    writeContainerLogs(appLogDir, container, logFiles1WithFinalLog);
 
     // Do log aggregation
     AppLogAggregatorImpl aggregator =
@@ -1355,10 +1369,16 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
       Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application,
         50, 1, false, null));
     }
-    String logFileInLastCycle = null;
     // Container logs should be uploaded
-    logFileInLastCycle = verifyContainerLogs(logAggregationService, application,
+    logFileStatusInLastCycle = verifyContainerLogs(logAggregationService, application,
         new ContainerId[] { container }, logFiles1, 3, true);
+    for(String logFile : logFiles1) {
+      Assert.assertTrue(logFileStatusInLastCycle.getLogFileTypesInLastCycle()
+        .contains(logFile));
+    }
+    // Make sure the std_final is not uploaded.
+    Assert.assertFalse(logFileStatusInLastCycle.getLogFileTypesInLastCycle()
+      .contains("std_final"));
 
     Thread.sleep(2000);
 
@@ -1380,15 +1400,23 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
 
     if (retentionSizeLimitation) {
       Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application,
-        50, 1, true, logFileInLastCycle));
+        50, 1, true, logFileStatusInLastCycle.getLogFilePathInLastCycle()));
     } else {
       Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application,
         50, 2, false, null));
     }
     // Container logs should be uploaded
-    logFileInLastCycle = verifyContainerLogs(logAggregationService, application,
+    logFileStatusInLastCycle = verifyContainerLogs(logAggregationService, application,
         new ContainerId[] { container }, logFiles2, 3, true);
 
+    for(String logFile : logFiles2) {
+      Assert.assertTrue(logFileStatusInLastCycle.getLogFileTypesInLastCycle()
+        .contains(logFile));
+    }
+    // Make sure the std_final is not uploaded.
+    Assert.assertFalse(logFileStatusInLastCycle.getLogFileTypesInLastCycle()
+      .contains("std_final"));
+
     Thread.sleep(2000);
 
     // create another logs
@@ -1402,13 +1430,17 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     logAggregationService.handle(new LogHandlerAppFinishedEvent(application));
     if (retentionSizeLimitation) {
       Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application,
-        50, 1, true, logFileInLastCycle));
+        50, 1, true, logFileStatusInLastCycle.getLogFilePathInLastCycle()));
     } else {
       Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application,
         50, 3, false, null));
     }
+
+    // the app is finished. The log "std_final" should be aggregated this time.
+    String[] logFiles3WithFinalLog =
+        new String[] { "stdout_2", "stderr_2", "syslog_2", "std_final" };
     verifyContainerLogs(logAggregationService, application,
-      new ContainerId[] { container }, logFiles3, 3, true);
+      new ContainerId[] { container }, logFiles3WithFinalLog, 4, true);
     logAggregationService.stop();
     assertEquals(0, logAggregationService.getNumAggregators());
   }
@@ -1512,4 +1544,23 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     return numOfLogsAvailable(logAggregationService, application, sizeLimited,
       lastLogFile) == expectNum;
   }
+
+  private static class LogFileStatusInLastCycle {
+    private String logFilePathInLastCycle;
+    private List<String> logFileTypesInLastCycle;
+
+    public LogFileStatusInLastCycle(String logFilePathInLastCycle,
+        List<String> logFileTypesInLastCycle) {
+      this.logFilePathInLastCycle = logFilePathInLastCycle;
+      this.logFileTypesInLastCycle = logFileTypesInLastCycle;
+    }
+
+    public String getLogFilePathInLastCycle() {
+      return this.logFilePathInLastCycle;
+    }
+
+    public List<String> getLogFileTypesInLastCycle() {
+      return this.logFileTypesInLastCycle;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/863079bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
index 169517d..0ad2957 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
@@ -230,12 +230,18 @@ public class TestContainerAllocation {
     // create a not-null LogAggregationContext
     LogAggregationContext logAggregationContext =
         LogAggregationContext.newInstance(
-          "includePattern", "excludePattern");
+          "includePattern", "excludePattern",
+          "rolledLogsIncludePattern",
+          "rolledLogsExcludePattern");
     LogAggregationContext returned =
         getLogAggregationContextFromContainerToken(rm1, nm2,
           logAggregationContext);
     Assert.assertEquals("includePattern", returned.getIncludePattern());
     Assert.assertEquals("excludePattern", returned.getExcludePattern());
+    Assert.assertEquals("rolledLogsIncludePattern",
+      returned.getRolledLogsIncludePattern());
+    Assert.assertEquals("rolledLogsExcludePattern",
+      returned.getRolledLogsExcludePattern());
     rm1.stop();
   }
 


[37/49] hadoop git commit: HDFS-7830. DataNode does not release the volume lock when adding a volume fails. (Lei Xu via Colin P. McCabe)

Posted by zj...@apache.org.
HDFS-7830. DataNode does not release the volume lock when adding a volume fails. (Lei Xu via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c1036d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c1036d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c1036d5

Branch: refs/heads/YARN-2928
Commit: 5c1036d598051cf6af595740f1ab82092b0b6554
Parents: a5cf985
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Tue Mar 10 18:20:25 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Tue Mar 10 18:20:58 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hadoop/hdfs/server/common/Storage.java      |  2 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 16 +++++++-
 .../datanode/TestDataNodeHotSwapVolumes.java    | 34 ++--------------
 .../fsdataset/impl/FsDatasetTestUtil.java       | 43 ++++++++++++++++++++
 .../fsdataset/impl/TestFsDatasetImpl.java       | 43 ++++++++++++++++++++
 6 files changed, 108 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c1036d5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a2e552a..0ba34a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1133,6 +1133,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7818. OffsetParam should return the default value instead of throwing
     NPE when the value is unspecified. (Eric Payne via wheat9)
 
+    HDFS-7830. DataNode does not release the volume lock when adding a volume
+    fails. (Lei Xu via Colin P. Mccabe)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c1036d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index e6bd5b2..e6f0999 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -672,7 +672,7 @@ public abstract class Storage extends StorageInfo {
      */
     public void lock() throws IOException {
       if (isShared()) {
-        LOG.info("Locking is disabled");
+        LOG.info("Locking is disabled for " + this.root);
         return;
       }
       FileLock newLock = tryLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c1036d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 58f5615..0f28aa4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -46,6 +46,7 @@ import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -375,6 +376,12 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
   }
 
+  @VisibleForTesting
+  public FsVolumeImpl createFsVolume(String storageUuid, File currentDir,
+      StorageType storageType) throws IOException {
+    return new FsVolumeImpl(this, storageUuid, currentDir, conf, storageType);
+  }
+
   @Override
   public void addVolume(final StorageLocation location,
       final List<NamespaceInfo> nsInfos)
@@ -394,8 +401,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     final Storage.StorageDirectory sd = builder.getStorageDirectory();
 
     StorageType storageType = location.getStorageType();
-    final FsVolumeImpl fsVolume = new FsVolumeImpl(
-        this, sd.getStorageUuid(), sd.getCurrentDir(), this.conf, storageType);
+    final FsVolumeImpl fsVolume =
+        createFsVolume(sd.getStorageUuid(), sd.getCurrentDir(), storageType);
     final ReplicaMap tempVolumeMap = new ReplicaMap(fsVolume);
     ArrayList<IOException> exceptions = Lists.newArrayList();
 
@@ -411,6 +418,11 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       }
     }
     if (!exceptions.isEmpty()) {
+      try {
+        sd.unlock();
+      } catch (IOException e) {
+        exceptions.add(e);
+      }
       throw MultipleIOException.createIOException(exceptions);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c1036d5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 3d0bccc..ac316e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -37,18 +37,14 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Test;
 
 import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.nio.channels.FileChannel;
-import java.nio.channels.FileLock;
-import java.nio.channels.OverlappingFileLockException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -70,7 +66,6 @@ import static org.hamcrest.CoreMatchers.not;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -538,31 +533,10 @@ public class TestDataNodeHotSwapVolumes {
   private static void assertFileLocksReleased(Collection<String> dirs)
       throws IOException {
     for (String dir: dirs) {
-      StorageLocation sl = StorageLocation.parse(dir);
-      File lockFile = new File(sl.getFile(), Storage.STORAGE_FILE_LOCK);
-      RandomAccessFile raf = null;
-      FileChannel channel = null;
-      FileLock lock = null;
       try {
-        raf = new RandomAccessFile(lockFile, "rws");
-        channel = raf.getChannel();
-        lock = channel.tryLock();
-        assertNotNull(String.format(
-          "Lock file at %s appears to be held by a different process.",
-          lockFile.getAbsolutePath()), lock);
-      } catch (OverlappingFileLockException e) {
-        fail(String.format("Must release lock file at %s.",
-          lockFile.getAbsolutePath()));
-      } finally {
-        if (lock != null) {
-          try {
-            lock.release();
-          } catch (IOException e) {
-            LOG.warn(String.format("I/O error releasing file lock %s.",
-              lockFile.getAbsolutePath()), e);
-          }
-        }
-        IOUtils.cleanup(null, channel, raf);
+        FsDatasetTestUtil.assertFileLockReleased(dir);
+      } catch (IOException e) {
+        LOG.warn(e);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c1036d5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
index f9e30e1..7ac9b65 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
@@ -19,13 +19,24 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import java.io.File;
 import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.channels.FileChannel;
+import java.nio.channels.FileLock;
+import java.nio.channels.OverlappingFileLockException;
 import java.util.Collection;
+import java.util.Random;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.io.IOUtils;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
 
 public class FsDatasetTestUtil {
 
@@ -72,4 +83,36 @@ public class FsDatasetTestUtil {
     FsDatasetImpl fsDataset = ((FsDatasetImpl) dn.getFSDataset());
     ((FsDatasetImpl.LazyWriter) fsDataset.lazyWriter.getRunnable()).stop();
   }
+
+  /**
+   * Asserts that the storage lock file in the given directory has been
+   * released.  This method works by trying to acquire the lock file itself.  If
+   * locking fails here, then the main code must have failed to release it.
+   *
+   * @param dir the storage directory to check
+   * @throws IOException if there is an unexpected I/O error
+   */
+  public static void assertFileLockReleased(String dir) throws IOException {
+    StorageLocation sl = StorageLocation.parse(dir);
+    File lockFile = new File(sl.getFile(), Storage.STORAGE_FILE_LOCK);
+    try (RandomAccessFile raf = new RandomAccessFile(lockFile, "rws");
+        FileChannel channel = raf.getChannel()) {
+      FileLock lock = channel.tryLock();
+      assertNotNull(String.format(
+          "Lock file at %s appears to be held by a different process.",
+          lockFile.getAbsolutePath()), lock);
+      if (lock != null) {
+        try {
+          lock.release();
+        } catch (IOException e) {
+          FsDatasetImpl.LOG.warn(String.format("I/O error releasing file lock %s.",
+              lockFile.getAbsolutePath()), e);
+          throw e;
+        }
+      }
+    } catch (OverlappingFileLockException e) {
+      fail(String.format("Must release lock file at %s.",
+          lockFile.getAbsolutePath()));
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c1036d5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 8c28033..3b47dd0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -35,11 +35,13 @@ import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Matchers;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -57,12 +59,17 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyList;
 import static org.mockito.Matchers.anyListOf;
+import static org.mockito.Matchers.anyString;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
@@ -291,4 +298,40 @@ public class TestFsDatasetImpl {
     assertFalse(volumeList.getVolumes().contains(brokenVolume));
     assertEquals(NUM_VOLUMES - 1, volumeList.getVolumes().size());
   }
+
+  @Test
+  public void testAddVolumeFailureReleasesInUseLock() throws IOException {
+    FsDatasetImpl spyDataset = spy(dataset);
+    FsVolumeImpl mockVolume = mock(FsVolumeImpl.class);
+    File badDir = new File(BASE_DIR, "bad");
+    badDir.mkdirs();
+    doReturn(mockVolume).when(spyDataset)
+        .createFsVolume(anyString(), any(File.class), any(StorageType.class));
+    doThrow(new IOException("Failed to getVolumeMap()"))
+      .when(mockVolume).getVolumeMap(
+        anyString(),
+        any(ReplicaMap.class),
+        any(RamDiskReplicaLruTracker.class));
+
+    Storage.StorageDirectory sd = createStorageDirectory(badDir);
+    sd.lock();
+    DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
+    when(storage.prepareVolume(eq(datanode), eq(badDir),
+        Matchers.<List<NamespaceInfo>>any()))
+        .thenReturn(builder);
+
+    StorageLocation location = StorageLocation.parse(badDir.toString());
+    List<NamespaceInfo> nsInfos = Lists.newArrayList();
+    for (String bpid : BLOCK_POOL_IDS) {
+      nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
+    }
+
+    try {
+      spyDataset.addVolume(location, nsInfos);
+      fail("Expect to throw MultipleIOException");
+    } catch (MultipleIOException e) {
+    }
+
+    FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
+  }
 }


[34/49] hadoop git commit: MAPREDUCE-4815. Speed up FileOutputCommitter#commitJob for many output files. (Siqi Li via gera)

Posted by zj...@apache.org.
MAPREDUCE-4815. Speed up FileOutputCommitter#commitJob for many output files. (Siqi Li via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa92b764
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa92b764
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa92b764

Branch: refs/heads/YARN-2928
Commit: aa92b764a7ddb888d097121c4d610089a0053d11
Parents: a380643
Author: Gera Shegalov <ge...@apache.org>
Authored: Tue Mar 10 11:12:48 2015 -0700
Committer: Gera Shegalov <ge...@apache.org>
Committed: Tue Mar 10 11:32:08 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |   3 +
 .../lib/output/FileOutputCommitter.java         | 119 ++++++++++------
 .../src/main/resources/mapred-default.xml       |  54 ++++++++
 .../hadoop/mapred/TestFileOutputCommitter.java  | 134 +++++++++++++++----
 .../lib/output/TestFileOutputCommitter.java     | 116 ++++++++++++++--
 5 files changed, 349 insertions(+), 77 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa92b764/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index eecf022..0bbe85c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -348,6 +348,9 @@ Release 2.7.0 - UNRELEASED
 
     MAPREDUCE-6059. Speed up history server startup time (Siqi Li via aw)
 
+    MAPREDUCE-4815. Speed up FileOutputCommitter#commitJob for many output
+    files. (Siqi Li via gera)
+
   BUG FIXES
 
     MAPREDUCE-6210. Use getApplicationAttemptId() instead of getApplicationId()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa92b764/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
index 55252f0..28a8548 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.mapreduce.lib.output;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 
 import org.apache.commons.logging.Log;
@@ -25,6 +26,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -57,10 +59,14 @@ public class FileOutputCommitter extends OutputCommitter {
   @Deprecated
   protected static final String TEMP_DIR_NAME = PENDING_DIR_NAME;
   public static final String SUCCEEDED_FILE_NAME = "_SUCCESS";
-  public static final String SUCCESSFUL_JOB_OUTPUT_DIR_MARKER = 
-    "mapreduce.fileoutputcommitter.marksuccessfuljobs";
+  public static final String SUCCESSFUL_JOB_OUTPUT_DIR_MARKER =
+      "mapreduce.fileoutputcommitter.marksuccessfuljobs";
+  public static final String FILEOUTPUTCOMMITTER_ALGORITHM_VERSION =
+      "mapreduce.fileoutputcommitter.algorithm.version";
+  public static final int FILEOUTPUTCOMMITTER_ALGORITHM_VERSION_DEFAULT = 1;
   private Path outputPath = null;
   private Path workPath = null;
+  private final int algorithmVersion;
 
   /**
    * Create a file output committer
@@ -87,6 +93,14 @@ public class FileOutputCommitter extends OutputCommitter {
   @Private
   public FileOutputCommitter(Path outputPath, 
                              JobContext context) throws IOException {
+    Configuration conf = context.getConfiguration();
+    algorithmVersion =
+        conf.getInt(FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
+                    FILEOUTPUTCOMMITTER_ALGORITHM_VERSION_DEFAULT);
+    LOG.info("File Output Committer Algorithm version is " + algorithmVersion);
+    if (algorithmVersion != 1 && algorithmVersion != 2) {
+      throw new IOException("Only 1 or 2 algorithm version is supported");
+    }
     if (outputPath != null) {
       FileSystem fs = outputPath.getFileSystem(context.getConfiguration());
       this.outputPath = fs.makeQualified(outputPath);
@@ -248,14 +262,14 @@ public class FileOutputCommitter extends OutputCommitter {
     return new Path(getJobAttemptPath(appAttemptId, out),
         String.valueOf(context.getTaskAttemptID().getTaskID()));
   }
-  
+
   private static class CommittedTaskFilter implements PathFilter {
     @Override
     public boolean accept(Path path) {
       return !PENDING_DIR_NAME.equals(path.getName());
     }
   }
-  
+
   /**
    * Get a list of all paths where output from committed tasks are stored.
    * @param context the context of the current job
@@ -268,7 +282,7 @@ public class FileOutputCommitter extends OutputCommitter {
     FileSystem fs = jobAttemptPath.getFileSystem(context.getConfiguration());
     return fs.listStatus(jobAttemptPath, new CommittedTaskFilter());
   }
-  
+
   /**
    * Get the directory that the task should write results into.
    * @return the work directory
@@ -295,7 +309,7 @@ public class FileOutputCommitter extends OutputCommitter {
       LOG.warn("Output Path is null in setupJob()");
     }
   }
-  
+
   /**
    * The job has completed so move all committed tasks to the final output dir.
    * Delete the temporary directory, including all of the work directories.
@@ -306,8 +320,11 @@ public class FileOutputCommitter extends OutputCommitter {
     if (hasOutputPath()) {
       Path finalOutput = getOutputPath();
       FileSystem fs = finalOutput.getFileSystem(context.getConfiguration());
-      for(FileStatus stat: getAllCommittedTaskPaths(context)) {
-        mergePaths(fs, stat, finalOutput);
+
+      if (algorithmVersion == 1) {
+        for (FileStatus stat: getAllCommittedTaskPaths(context)) {
+          mergePaths(fs, stat, finalOutput);
+        }
       }
 
       // delete the _temporary folder and create a _done file in the o/p folder
@@ -417,27 +434,42 @@ public class FileOutputCommitter extends OutputCommitter {
 
   @Private
   public void commitTask(TaskAttemptContext context, Path taskAttemptPath) 
-  throws IOException {
+      throws IOException {
+
     TaskAttemptID attemptId = context.getTaskAttemptID();
     if (hasOutputPath()) {
       context.progress();
       if(taskAttemptPath == null) {
         taskAttemptPath = getTaskAttemptPath(context);
       }
-      Path committedTaskPath = getCommittedTaskPath(context);
       FileSystem fs = taskAttemptPath.getFileSystem(context.getConfiguration());
-      if (fs.exists(taskAttemptPath)) {
-        if(fs.exists(committedTaskPath)) {
-          if(!fs.delete(committedTaskPath, true)) {
-            throw new IOException("Could not delete " + committedTaskPath);
+      FileStatus taskAttemptDirStatus;
+      try {
+        taskAttemptDirStatus = fs.getFileStatus(taskAttemptPath);
+      } catch (FileNotFoundException e) {
+        taskAttemptDirStatus = null;
+      }
+
+      if (taskAttemptDirStatus != null) {
+        if (algorithmVersion == 1) {
+          Path committedTaskPath = getCommittedTaskPath(context);
+          if (fs.exists(committedTaskPath)) {
+             if (!fs.delete(committedTaskPath, true)) {
+               throw new IOException("Could not delete " + committedTaskPath);
+             }
           }
+          if (!fs.rename(taskAttemptPath, committedTaskPath)) {
+            throw new IOException("Could not rename " + taskAttemptPath + " to "
+                + committedTaskPath);
+          }
+          LOG.info("Saved output of task '" + attemptId + "' to " +
+              committedTaskPath);
+        } else {
+          // directly merge everything from taskAttemptPath to output directory
+          mergePaths(fs, taskAttemptDirStatus, outputPath);
+          LOG.info("Saved output of task '" + attemptId + "' to " +
+              outputPath);
         }
-        if(!fs.rename(taskAttemptPath, committedTaskPath)) {
-          throw new IOException("Could not rename " + taskAttemptPath + " to "
-              + committedTaskPath);
-        }
-        LOG.info("Saved output of task '" + attemptId + "' to " + 
-            committedTaskPath);
       } else {
         LOG.warn("No Output found for " + attemptId);
       }
@@ -511,32 +543,43 @@ public class FileOutputCommitter extends OutputCommitter {
         throw new IOException ("Cannot recover task output for first attempt...");
       }
 
-      Path committedTaskPath = getCommittedTaskPath(context);
       Path previousCommittedTaskPath = getCommittedTaskPath(
           previousAttempt, context);
-      FileSystem fs = committedTaskPath.getFileSystem(context.getConfiguration());
+      FileSystem fs = previousCommittedTaskPath.getFileSystem(context.getConfiguration());
 
-      LOG.debug("Trying to recover task from " + previousCommittedTaskPath 
-          + " into " + committedTaskPath);
-      if (fs.exists(previousCommittedTaskPath)) {
-        if(fs.exists(committedTaskPath)) {
-          if(!fs.delete(committedTaskPath, true)) {
-            throw new IOException("Could not delete "+committedTaskPath);
+      LOG.debug("Trying to recover task from " + previousCommittedTaskPath);
+      if (algorithmVersion == 1) {
+        if (fs.exists(previousCommittedTaskPath)) {
+          Path committedTaskPath = getCommittedTaskPath(context);
+          if (fs.exists(committedTaskPath)) {
+            if (!fs.delete(committedTaskPath, true)) {
+              throw new IOException("Could not delete "+committedTaskPath);
+            }
           }
+          //Rename can fail if the parent directory does not yet exist.
+          Path committedParent = committedTaskPath.getParent();
+          fs.mkdirs(committedParent);
+          if (!fs.rename(previousCommittedTaskPath, committedTaskPath)) {
+            throw new IOException("Could not rename " + previousCommittedTaskPath +
+                " to " + committedTaskPath);
+          }
+        } else {
+            LOG.warn(attemptId+" had no output to recover.");
         }
-        //Rename can fail if the parent directory does not yet exist.
-        Path committedParent = committedTaskPath.getParent();
-        fs.mkdirs(committedParent);
-        if(!fs.rename(previousCommittedTaskPath, committedTaskPath)) {
-          throw new IOException("Could not rename " + previousCommittedTaskPath +
-              " to " + committedTaskPath);
-        }
-        LOG.info("Saved output of " + attemptId + " to " + committedTaskPath);
       } else {
-        LOG.warn(attemptId+" had no output to recover.");
+        // essentially a no-op, but for backwards compatibility
+        // after upgrade to the new fileOutputCommitter,
+        // check if there are any output left in committedTaskPath
+        if (fs.exists(previousCommittedTaskPath)) {
+          LOG.info("Recovering task for upgrading scenario, moving files from "
+              + previousCommittedTaskPath + " to " + outputPath);
+          FileStatus from = fs.getFileStatus(previousCommittedTaskPath);
+          mergePaths(fs, from, outputPath);
+        }
+        LOG.info("Done recovering task " + attemptId);
       }
     } else {
       LOG.warn("Output Path is null in recoverTask()");
     }
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa92b764/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 1162183..d7bec9c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1318,6 +1318,60 @@
 </property>
 
 <property>
+  <name>mapreduce.fileoutputcommitter.algorithm.version</name>
+  <value>1</value>
+  <description>The file output committer algorithm version
+  valid algorithm version number: 1 or 2
+  default to 1, which is the original algorithm
+
+  In algorithm version 1,
+
+  1. commitTask will rename directory
+  $joboutput/_temporary/$appAttemptID/_temporary/$taskAttemptID/
+  to
+  $joboutput/_temporary/$appAttemptID/$taskID/
+
+  2. recoverTask will also do a rename
+  $joboutput/_temporary/$appAttemptID/$taskID/
+  to
+  $joboutput/_temporary/($appAttemptID + 1)/$taskID/
+
+  3. commitJob will merge every task output file in
+  $joboutput/_temporary/$appAttemptID/$taskID/
+  to
+  $joboutput/, then it will delete $joboutput/_temporary/
+  and write $joboutput/_SUCCESS
+
+  It has a performance regression, which is discussed in MAPREDUCE-4815.
+  If a job generates many files to commit then the commitJob
+  method call at the end of the job can take minutes.
+  the commit is single-threaded and waits until all
+  tasks have completed before commencing.
+
+  algorithm version 2 will change the behavior of commitTask,
+  recoverTask, and commitJob.
+
+  1. commitTask will rename all files in
+  $joboutput/_temporary/$appAttemptID/_temporary/$taskAttemptID/
+  to $joboutput/
+
+  2. recoverTask actually doesn't require to do anything, but for
+  upgrade from version 1 to version 2 case, it will check if there
+  are any files in
+  $joboutput/_temporary/($appAttemptID - 1)/$taskID/
+  and rename them to $joboutput/
+
+  3. commitJob can simply delete $joboutput/_temporary and write
+  $joboutput/_SUCCESS
+
+  This algorithm will reduce the output commit time for
+  large jobs by having the tasks commit directly to the final
+  output directory as they were completing and commitJob had
+  very little to do.
+  </description>
+</property>
+
+<property>
   <name>yarn.app.mapreduce.am.scheduler.heartbeat.interval-ms</name>
   <value>1000</value>
   <description>The interval in ms at which the MR AppMaster should send

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa92b764/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java
index 0859571..3207a71 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java
@@ -50,7 +50,6 @@ public class TestFileOutputCommitter extends TestCase {
   private Text val1 = new Text("val1");
   private Text val2 = new Text("val2");
 
-  
   private void writeOutput(RecordWriter theRecordWriter,
       TaskAttemptContext context) throws IOException, InterruptedException {
     NullWritable nullWritable = NullWritable.get();
@@ -83,12 +82,16 @@ public class TestFileOutputCommitter extends TestCase {
       theRecordWriter.close(null);
     }
   }
-  
-  public void testRecovery() throws Exception {
-    JobConf conf = new JobConf();
+
+  private void testRecoveryInternal(int commitVersion, int recoveryVersion)
+      throws Exception {
+  JobConf conf = new JobConf();
     FileOutputFormat.setOutputPath(conf, outDir);
     conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
     conf.setInt(MRConstants.APPLICATION_ATTEMPT_ID, 1);
+    conf.setInt(org.apache.hadoop.mapreduce.lib.output.
+        FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
+        commitVersion);
     JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
     TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
     FileOutputCommitter committer = new FileOutputCommitter();
@@ -99,7 +102,7 @@ public class TestFileOutputCommitter extends TestCase {
 
     // write output
     TextOutputFormat theOutputFormat = new TextOutputFormat();
-    RecordWriter theRecordWriter = 
+    RecordWriter theRecordWriter =
         theOutputFormat.getRecordWriter(null, conf, partFile, null);
     writeOutput(theRecordWriter, tContext);
 
@@ -107,31 +110,59 @@ public class TestFileOutputCommitter extends TestCase {
     if(committer.needsTaskCommit(tContext)) {
       committer.commitTask(tContext);
     }
+
     Path jobTempDir1 = committer.getCommittedTaskPath(tContext);
     File jtd1 = new File(jobTempDir1.toUri().getPath());
-    assertTrue(jtd1.exists());
-    validateContent(jobTempDir1);        
-    
-    //now while running the second app attempt, 
+    if (commitVersion == 1) {
+      assertTrue("Version 1 commits to temporary dir " + jtd1, jtd1.exists());
+      validateContent(jobTempDir1);
+    } else {
+      assertFalse("Version 2 commits to output dir " + jtd1, jtd1.exists());
+    }
+
+    //now while running the second app attempt,
     //recover the task output from first attempt
     JobConf conf2 = new JobConf(conf);
     conf2.set(JobContext.TASK_ATTEMPT_ID, attempt);
     conf2.setInt(MRConstants.APPLICATION_ATTEMPT_ID, 2);
+    conf2.setInt(org.apache.hadoop.mapreduce.lib.output.
+        FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
+        recoveryVersion);
     JobContext jContext2 = new JobContextImpl(conf2, taskID.getJobID());
     TaskAttemptContext tContext2 = new TaskAttemptContextImpl(conf2, taskID);
     FileOutputCommitter committer2 = new FileOutputCommitter();
     committer2.setupJob(jContext2);
-    Path jobTempDir2 = committer2.getCommittedTaskPath(tContext2);
-    
+
     committer2.recoverTask(tContext2);
+
+    Path jobTempDir2 = committer2.getCommittedTaskPath(tContext2);
     File jtd2 = new File(jobTempDir2.toUri().getPath());
-    assertTrue(jtd2.exists());
-    validateContent(jobTempDir2);
-    
+    if (recoveryVersion == 1) {
+      assertTrue("Version 1 recovers to " + jtd2, jtd2.exists());
+      validateContent(jobTempDir2);
+    } else {
+        assertFalse("Version 2 commits to output dir " + jtd2, jtd2.exists());
+        if (commitVersion == 1) {
+          assertTrue("Version 2  recovery moves to output dir from "
+              + jtd1 , jtd1.list().length == 0);
+        }
+      }
+
     committer2.commitJob(jContext2);
     validateContent(outDir);
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
+  public void testRecoveryV1() throws Exception {
+    testRecoveryInternal(1, 1);
+  }
+
+  public void testRecoveryV2() throws Exception {
+    testRecoveryInternal(2, 2);
+  }
+
+  public void testRecoveryUpgradeV1V2() throws Exception {
+    testRecoveryInternal(1, 2);
+  }
 
   private void validateContent(Path dir) throws IOException {
     File fdir = new File(dir.toUri().getPath());
@@ -170,11 +201,13 @@ public class TestFileOutputCommitter extends TestCase {
     assert(fileCount > 0);
     assert(dataFileFound && indexFileFound);
   }
-  
-  public void testCommitter() throws Exception {
+
+  private void testCommitterInternal(int version) throws Exception {
     JobConf conf = new JobConf();
     FileOutputFormat.setOutputPath(conf, outDir);
     conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
+    conf.setInt(org.apache.hadoop.mapreduce.lib.output.
+        FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version);
     JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
     TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
     FileOutputCommitter committer = new FileOutputCommitter();
@@ -200,21 +233,33 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
-  public void testMapFileOutputCommitter() throws Exception {
+  public void testCommitterV1() throws Exception {
+    testCommitterInternal(1);
+  }
+
+  public void testCommitterV2() throws Exception {
+    testCommitterInternal(2);
+  }
+
+  private void testMapFileOutputCommitterInternal(int version)
+      throws Exception {
     JobConf conf = new JobConf();
     FileOutputFormat.setOutputPath(conf, outDir);
     conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
+    conf.setInt(org.apache.hadoop.mapreduce.lib.output.
+        FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version);
     JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
     TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
-    FileOutputCommitter committer = new FileOutputCommitter();    
-    
+    FileOutputCommitter committer = new FileOutputCommitter();
+
     // setup
     committer.setupJob(jContext);
     committer.setupTask(tContext);
 
     // write output
     MapFileOutputFormat theOutputFormat = new MapFileOutputFormat();
-    RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(null, conf, partFile, null);
+    RecordWriter theRecordWriter =
+        theOutputFormat.getRecordWriter(null, conf, partFile, null);
     writeMapFileOutput(theRecordWriter, tContext);
 
     // do commit
@@ -227,11 +272,29 @@ public class TestFileOutputCommitter extends TestCase {
     validateMapFileOutputContent(FileSystem.get(conf), outDir);
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
-  
-  public void testMapOnlyNoOutput() throws Exception {
+
+  public void testMapFileOutputCommitterV1() throws Exception {
+    testMapFileOutputCommitterInternal(1);
+  }
+
+  public void testMapFileOutputCommitterV2() throws Exception {
+    testMapFileOutputCommitterInternal(2);
+  }
+
+  public void testMapOnlyNoOutputV1() throws Exception {
+    testMapOnlyNoOutputInternal(1);
+  }
+
+  public void testMapOnlyNoOutputV2() throws Exception {
+    testMapOnlyNoOutputInternal(2);
+  }
+
+  private void testMapOnlyNoOutputInternal(int version) throws Exception {
     JobConf conf = new JobConf();
     //This is not set on purpose. FileOutputFormat.setOutputPath(conf, outDir);
     conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
+    conf.setInt(org.apache.hadoop.mapreduce.lib.output.
+        FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version);
     JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
     TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
     FileOutputCommitter committer = new FileOutputCommitter();    
@@ -249,11 +312,14 @@ public class TestFileOutputCommitter extends TestCase {
     // validate output
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
-  
-  public void testAbort() throws IOException, InterruptedException {
+
+  private void testAbortInternal(int version)
+      throws IOException, InterruptedException {
     JobConf conf = new JobConf();
     FileOutputFormat.setOutputPath(conf, outDir);
     conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
+    conf.setInt(org.apache.hadoop.mapreduce.lib.output.
+        FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version);
     JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
     TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
     FileOutputCommitter committer = new FileOutputCommitter();
@@ -283,6 +349,14 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(out);
   }
 
+  public void testAbortV1() throws Exception {
+    testAbortInternal(1);
+  }
+
+  public void testAbortV2() throws Exception {
+    testAbortInternal(2);
+  }
+
   public static class FakeFileSystem extends RawLocalFileSystem {
     public FakeFileSystem() {
       super();
@@ -299,11 +373,14 @@ public class TestFileOutputCommitter extends TestCase {
   }
 
   
-  public void testFailAbort() throws IOException, InterruptedException {
+  private void testFailAbortInternal(int version)
+      throws IOException, InterruptedException {
     JobConf conf = new JobConf();
     conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "faildel:///");
     conf.setClass("fs.faildel.impl", FakeFileSystem.class, FileSystem.class);
     conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
+    conf.setInt(org.apache.hadoop.mapreduce.lib.output.
+        FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, version);
     conf.setInt(MRConstants.APPLICATION_ATTEMPT_ID, 1);
     FileOutputFormat.setOutputPath(conf, outDir);
     JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
@@ -353,6 +430,13 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  public void testFailAbortV1() throws Exception {
+    testFailAbortInternal(1);
+  }
+
+  public void testFailAbortV2() throws Exception {
+    testFailAbortInternal(2);
+  }
   public static String slurp(File f) throws IOException {
     int len = (int) f.length();
     byte[] buf = new byte[len];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa92b764/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
index d20480e..8f60300 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
@@ -109,12 +109,15 @@ public class TestFileOutputCommitter extends TestCase {
     }
   }
   
-  public void testRecovery() throws Exception {
+  private void testRecoveryInternal(int commitVersion, int recoveryVersion)
+      throws Exception {
     Job job = Job.getInstance();
     FileOutputFormat.setOutputPath(job, outDir);
     Configuration conf = job.getConfiguration();
     conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
     conf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 1);
+    conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
+        commitVersion);
     JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
     TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
     FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);
@@ -130,32 +133,59 @@ public class TestFileOutputCommitter extends TestCase {
 
     // do commit
     committer.commitTask(tContext);
+
     Path jobTempDir1 = committer.getCommittedTaskPath(tContext);
     File jtd = new File(jobTempDir1.toUri().getPath());
-    assertTrue(jtd.exists());
-    validateContent(jtd);    
-    
+    if (commitVersion == 1) {
+      assertTrue("Version 1 commits to temporary dir " + jtd, jtd.exists());
+      validateContent(jtd);
+    } else {
+      assertFalse("Version 2 commits to output dir " + jtd, jtd.exists());
+    }
+
     //now while running the second app attempt, 
     //recover the task output from first attempt
     Configuration conf2 = job.getConfiguration();
     conf2.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
     conf2.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 2);
+    conf2.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
+        recoveryVersion);
     JobContext jContext2 = new JobContextImpl(conf2, taskID.getJobID());
     TaskAttemptContext tContext2 = new TaskAttemptContextImpl(conf2, taskID);
     FileOutputCommitter committer2 = new FileOutputCommitter(outDir, tContext2);
     committer2.setupJob(tContext2);
     Path jobTempDir2 = committer2.getCommittedTaskPath(tContext2);
     File jtd2 = new File(jobTempDir2.toUri().getPath());
-    
+
     committer2.recoverTask(tContext2);
-    assertTrue(jtd2.exists());
-    validateContent(jtd2);
-    
+    if (recoveryVersion == 1) {
+      assertTrue("Version 1 recovers to " + jtd2, jtd2.exists());
+      validateContent(jtd2);
+    } else {
+      assertFalse("Version 2 commits to output dir " + jtd2, jtd2.exists());
+      if (commitVersion == 1) {
+        assertTrue("Version 2  recovery moves to output dir from "
+            + jtd , jtd.list().length == 0);
+      }
+    }
+
     committer2.commitJob(jContext2);
     validateContent(outDir);
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  public void testRecoveryV1() throws Exception {
+    testRecoveryInternal(1, 1);
+  }
+
+  public void testRecoveryV2() throws Exception {
+    testRecoveryInternal(2, 2);
+  }
+
+  public void testRecoveryUpgradeV1V2() throws Exception {
+    testRecoveryInternal(1, 2);
+  }
+
   private void validateContent(Path dir) throws IOException {
     validateContent(new File(dir.toUri().getPath()));
   }
@@ -197,12 +227,14 @@ public class TestFileOutputCommitter extends TestCase {
     assert(fileCount > 0);
     assert(dataFileFound && indexFileFound);
   }
-  
-  public void testCommitter() throws Exception {
-    Job job = Job.getInstance();
+
+  private void testCommitterInternal(int version) throws Exception {
+  Job job = Job.getInstance();
     FileOutputFormat.setOutputPath(job, outDir);
     Configuration conf = job.getConfiguration();
     conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
+    conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
+        version);
     JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
     TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
     FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);
@@ -225,11 +257,22 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
-  public void testMapFileOutputCommitter() throws Exception {
+  public void testCommitterV1() throws Exception {
+    testCommitterInternal(1);
+  }
+
+  public void testCommitterV2() throws Exception {
+    testCommitterInternal(2);
+  }
+
+  private void testMapFileOutputCommitterInternal(int version)
+      throws Exception {
     Job job = Job.getInstance();
     FileOutputFormat.setOutputPath(job, outDir);
     Configuration conf = job.getConfiguration();
     conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
+    conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
+        version);
     JobContext jContext = new JobContextImpl(conf, taskID.getJobID());    
     TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
     FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);
@@ -251,12 +294,38 @@ public class TestFileOutputCommitter extends TestCase {
     validateMapFileOutputContent(FileSystem.get(job.getConfiguration()), outDir);
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
+
+  public void testMapFileOutputCommitterV1() throws Exception {
+    testMapFileOutputCommitterInternal(1);
+  }
   
-  public void testAbort() throws IOException, InterruptedException {
+  public void testMapFileOutputCommitterV2() throws Exception {
+    testMapFileOutputCommitterInternal(2);
+  }
+
+  public void testInvalidVersionNumber() throws IOException {
+    Job job = Job.getInstance();
+    FileOutputFormat.setOutputPath(job, outDir);
+    Configuration conf = job.getConfiguration();
+    conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
+    conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION, 3);
+    TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
+    try {
+      new FileOutputCommitter(outDir, tContext);
+      fail("should've thrown an exception!");
+    } catch (IOException e) {
+      //test passed
+    }
+  }
+
+  private void testAbortInternal(int version)
+      throws IOException, InterruptedException {
     Job job = Job.getInstance();
     FileOutputFormat.setOutputPath(job, outDir);
     Configuration conf = job.getConfiguration();
     conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
+    conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
+        version);
     JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
     TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
     FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);
@@ -285,6 +354,14 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  public void testAbortV1() throws IOException, InterruptedException {
+    testAbortInternal(1);
+  }
+
+  public void testAbortV2() throws IOException, InterruptedException {
+    testAbortInternal(2);
+  }
+
   public static class FakeFileSystem extends RawLocalFileSystem {
     public FakeFileSystem() {
       super();
@@ -301,13 +378,16 @@ public class TestFileOutputCommitter extends TestCase {
   }
 
   
-  public void testFailAbort() throws IOException, InterruptedException {
+  private void testFailAbortInternal(int version)
+      throws IOException, InterruptedException {
     Job job = Job.getInstance();
     Configuration conf = job.getConfiguration();
     conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "faildel:///");
     conf.setClass("fs.faildel.impl", FakeFileSystem.class, FileSystem.class);
     conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
     conf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 1);
+    conf.setInt(FileOutputCommitter.FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
+        version);
     FileOutputFormat.setOutputPath(job, outDir);
     JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
     TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
@@ -353,6 +433,14 @@ public class TestFileOutputCommitter extends TestCase {
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  public void testFailAbortV1() throws Exception {
+    testFailAbortInternal(1);
+  }
+
+  public void testFailAbortV2() throws Exception {
+    testFailAbortInternal(2);
+  }
+
   public static String slurp(File f) throws IOException {
     int len = (int) f.length();
     byte[] buf = new byte[len];


[19/49] hadoop git commit: Revert "HDFS-7857. Improve authentication failure WARN message to avoid user confusion. Contributed by Yongjun Zhang."

Posted by zj...@apache.org.
Revert "HDFS-7857. Improve authentication failure WARN message to avoid user confusion. Contributed by Yongjun Zhang."

This reverts commit d799fbe1ccf8752c44f087e34b5f400591d3b5bd.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5578e22c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5578e22c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5578e22c

Branch: refs/heads/YARN-2928
Commit: 5578e22ce9ca6cceb510c22ec307b1869ce1a7c5
Parents: d799fbe
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Sun Mar 8 20:54:43 2015 -0700
Committer: Yongjun Zhang <yz...@cloudera.com>
Committed: Sun Mar 8 20:54:43 2015 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/ipc/Server.java         | 9 ++-------
 1 file changed, 2 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5578e22c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index d2d61b3..893e0eb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1324,15 +1324,10 @@ public abstract class Server {
           saslResponse = processSaslMessage(saslMessage);
         } catch (IOException e) {
           rpcMetrics.incrAuthenticationFailures();
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(StringUtils.stringifyException(e));
-          }
           // attempting user could be null
-          IOException tce = (IOException) getTrueCause(e);
           AUDITLOG.warn(AUTH_FAILED_FOR + this.toString() + ":"
-              + attemptingUser + " (" + e.getLocalizedMessage()
-              + ") with true cause: (" + tce.getLocalizedMessage() + ")");
-          throw tce;
+              + attemptingUser + " (" + e.getLocalizedMessage() + ")");
+          throw (IOException) getTrueCause(e);
         }
         
         if (saslServer != null && saslServer.isComplete()) {


[24/49] hadoop git commit: YARN-3287. Made TimelineClient put methods do as the correct login context. Contributed by Daryn Sharp and Jonathan Eagles.

Posted by zj...@apache.org.
YARN-3287. Made TimelineClient put methods do as the correct login context. Contributed by Daryn Sharp and Jonathan Eagles.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6e05c5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6e05c5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6e05c5e

Branch: refs/heads/YARN-2928
Commit: d6e05c5ee26feefc17267b7c9db1e2a3dbdef117
Parents: 3241fc2
Author: Zhijie Shen <zj...@apache.org>
Authored: Mon Mar 9 13:54:36 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 9 13:54:36 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../client/api/impl/TimelineClientImpl.java     |  82 +++----
 .../TestTimelineAuthenticationFilter.java       | 221 +++++++++----------
 3 files changed, 135 insertions(+), 171 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6e05c5e/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index da8b02e..1894552 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -737,6 +737,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3275. CapacityScheduler: Preemption happening on non-preemptable
     queues (Eric Payne via jlowe)
 
+    YARN-3287. Made TimelineClient put methods do as the correct login context.
+    (Daryn Sharp and Jonathan Eagles via zjshen)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6e05c5e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index c05d65b..df6c7a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
@@ -108,6 +109,8 @@ public class TimelineClientImpl extends TimelineClient {
   private DelegationTokenAuthenticator authenticator;
   private DelegationTokenAuthenticatedURL.Token token;
   private URI resURI;
+  private UserGroupInformation authUgi;
+  private String doAsUser;
 
   @Private
   @VisibleForTesting
@@ -252,6 +255,15 @@ public class TimelineClientImpl extends TimelineClient {
   }
 
   protected void serviceInit(Configuration conf) throws Exception {
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    UserGroupInformation realUgi = ugi.getRealUser();
+    if (realUgi != null) {
+      authUgi = realUgi;
+      doAsUser = ugi.getShortUserName();
+    } else {
+      authUgi = ugi;
+      doAsUser = null;
+    }
     ClientConfig cc = new DefaultClientConfig();
     cc.getClasses().add(YarnJacksonJaxbJsonProvider.class);
     connConfigurator = newConnConfigurator(conf);
@@ -301,16 +313,20 @@ public class TimelineClientImpl extends TimelineClient {
     doPosting(domain, "domain");
   }
 
-  private ClientResponse doPosting(Object obj, String path) throws IOException, YarnException {
+  private ClientResponse doPosting(final Object obj, final String path)
+      throws IOException, YarnException {
     ClientResponse resp;
     try {
-      resp = doPostingObject(obj, path);
-    } catch (RuntimeException re) {
-      // runtime exception is expected if the client cannot connect the server
-      String msg =
-          "Failed to get the response from the timeline server.";
-      LOG.error(msg, re);
-      throw re;
+      resp = authUgi.doAs(new PrivilegedExceptionAction<ClientResponse>() {
+        @Override
+        public ClientResponse run() throws Exception {
+          return doPostingObject(obj, path);
+        }
+      });
+    } catch (UndeclaredThrowableException e) {
+        throw new IOException(e.getCause());
+    } catch (InterruptedException ie) {
+      throw new IOException(ie);
     }
     if (resp == null ||
         resp.getClientResponseStatus() != ClientResponse.Status.OK) {
@@ -331,11 +347,6 @@ public class TimelineClientImpl extends TimelineClient {
   @Override
   public Token<TimelineDelegationTokenIdentifier> getDelegationToken(
       final String renewer) throws IOException, YarnException {
-    boolean isProxyAccess =
-        UserGroupInformation.getCurrentUser().getAuthenticationMethod()
-        == UserGroupInformation.AuthenticationMethod.PROXY;
-    final String doAsUser = isProxyAccess ?
-        UserGroupInformation.getCurrentUser().getShortUserName() : null;
     PrivilegedExceptionAction<Token<TimelineDelegationTokenIdentifier>> getDTAction =
         new PrivilegedExceptionAction<Token<TimelineDelegationTokenIdentifier>>() {
 
@@ -357,11 +368,6 @@ public class TimelineClientImpl extends TimelineClient {
   public long renewDelegationToken(
       final Token<TimelineDelegationTokenIdentifier> timelineDT)
           throws IOException, YarnException {
-    boolean isProxyAccess =
-        UserGroupInformation.getCurrentUser().getAuthenticationMethod()
-        == UserGroupInformation.AuthenticationMethod.PROXY;
-    final String doAsUser = isProxyAccess ?
-        UserGroupInformation.getCurrentUser().getShortUserName() : null;
     boolean useHttps = YarnConfiguration.useHttps(this.getConfig());
     final String scheme = useHttps ? "https" : "http";
     final InetSocketAddress address = SecurityUtil.getTokenServiceAddr(timelineDT);
@@ -393,11 +399,6 @@ public class TimelineClientImpl extends TimelineClient {
   public void cancelDelegationToken(
       final Token<TimelineDelegationTokenIdentifier> timelineDT)
           throws IOException, YarnException {
-    boolean isProxyAccess =
-        UserGroupInformation.getCurrentUser().getAuthenticationMethod()
-        == UserGroupInformation.AuthenticationMethod.PROXY;
-    final String doAsUser = isProxyAccess ?
-        UserGroupInformation.getCurrentUser().getShortUserName() : null;
     boolean useHttps = YarnConfiguration.useHttps(this.getConfig());
     final String scheme = useHttps ? "https" : "http";
     final InetSocketAddress address = SecurityUtil.getTokenServiceAddr(timelineDT);
@@ -433,15 +434,9 @@ public class TimelineClientImpl extends TimelineClient {
       @Override
       public Object run() throws IOException {
         // Try pass the request, if fail, keep retrying
-        boolean isProxyAccess =
-            UserGroupInformation.getCurrentUser().getAuthenticationMethod()
-            == UserGroupInformation.AuthenticationMethod.PROXY;
-        UserGroupInformation callerUGI = isProxyAccess ?
-            UserGroupInformation.getCurrentUser().getRealUser()
-            : UserGroupInformation.getCurrentUser();
-        callerUGI.checkTGTAndReloginFromKeytab();
+        authUgi.checkTGTAndReloginFromKeytab();
         try {
-          return callerUGI.doAs(action);
+          return authUgi.doAs(action);
         } catch (UndeclaredThrowableException e) {
           throw new IOException(e.getCause());
         } catch (InterruptedException e) {
@@ -481,28 +476,15 @@ public class TimelineClientImpl extends TimelineClient {
 
     @Override
     public HttpURLConnection getHttpURLConnection(final URL url) throws IOException {
-      boolean isProxyAccess =
-          UserGroupInformation.getCurrentUser().getAuthenticationMethod()
-          == UserGroupInformation.AuthenticationMethod.PROXY;
-      UserGroupInformation callerUGI = isProxyAccess ?
-          UserGroupInformation.getCurrentUser().getRealUser()
-          : UserGroupInformation.getCurrentUser();
-      final String doAsUser = isProxyAccess ?
-          UserGroupInformation.getCurrentUser().getShortUserName() : null;
-      callerUGI.checkTGTAndReloginFromKeytab();
+      authUgi.checkTGTAndReloginFromKeytab();
       try {
-        return callerUGI.doAs(new PrivilegedExceptionAction<HttpURLConnection>() {
-          @Override
-          public HttpURLConnection run() throws Exception {
-            return new DelegationTokenAuthenticatedURL(
-                authenticator, connConfigurator).openConnection(url, token,
-                doAsUser);
-          }
-        });
+        return new DelegationTokenAuthenticatedURL(
+            authenticator, connConfigurator).openConnection(url, token,
+              doAsUser);
       } catch (UndeclaredThrowableException e) {
         throw new IOException(e.getCause());
-      } catch (InterruptedException e) {
-        throw new IOException(e);
+      } catch (AuthenticationException ae) {
+        throw new IOException(ae);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6e05c5e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java
index 53d8c81..c93e8f2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.KerberosTestUtils;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
@@ -47,9 +48,9 @@ import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer;
 import org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore;
 import org.apache.hadoop.yarn.server.timeline.TimelineStore;
-import org.junit.After;
+import org.junit.AfterClass;
 import org.junit.Assert;
-import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -77,20 +78,19 @@ public class TestTimelineAuthenticationFilter {
     return Arrays.asList(new Object[][] { { false }, { true } });
   }
 
-  private MiniKdc testMiniKDC;
-  private String keystoresDir;
-  private String sslConfDir;
-  private ApplicationHistoryServer testTimelineServer;
-  private Configuration conf;
-  private TimelineClient client;
-  private boolean withSsl;
+  private static MiniKdc testMiniKDC;
+  private static String keystoresDir;
+  private static String sslConfDir;
+  private static ApplicationHistoryServer testTimelineServer;
+  private static Configuration conf;
+  private static boolean withSsl;
 
   public TestTimelineAuthenticationFilter(boolean withSsl) {
-    this.withSsl = withSsl;
+    TestTimelineAuthenticationFilter.withSsl = withSsl;
   }
 
-  @Before
-  public void setup() {
+  @BeforeClass
+  public static void setup() {
     try {
       testMiniKDC = new MiniKdc(MiniKdc.createConf(), testRootDir);
       testMiniKDC.start();
@@ -127,6 +127,7 @@ public class TestTimelineAuthenticationFilter {
           "localhost:8190");
       conf.set("hadoop.proxyuser.HTTP.hosts", "*");
       conf.set("hadoop.proxyuser.HTTP.users", FOO_USER);
+      conf.setInt(YarnConfiguration.TIMELINE_SERVICE_CLIENT_MAX_RETRIES, 1);
 
       if (withSsl) {
         conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
@@ -146,14 +147,17 @@ public class TestTimelineAuthenticationFilter {
     } catch (Exception e) {
       assertTrue("Couldn't setup TimelineServer", false);
     }
+  }
 
-    client = TimelineClient.createTimelineClient();
+  private TimelineClient createTimelineClientForUGI() {
+    TimelineClient client = TimelineClient.createTimelineClient();
     client.init(conf);
     client.start();
+    return client;
   }
 
-  @After
-  public void tearDown() throws Exception {
+  @AfterClass
+  public static void tearDown() throws Exception {
     if (testMiniKDC != null) {
       testMiniKDC.stop();
     }
@@ -162,10 +166,6 @@ public class TestTimelineAuthenticationFilter {
       testTimelineServer.stop();
     }
 
-    if (client != null) {
-      client.stop();
-    }
-
     if (withSsl) {
       KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
       File base = new File(BASEDIR);
@@ -178,6 +178,7 @@ public class TestTimelineAuthenticationFilter {
     KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<Void>() {
       @Override
       public Void call() throws Exception {
+        TimelineClient client = createTimelineClientForUGI();
         TimelineEntity entityToStore = new TimelineEntity();
         entityToStore.setEntityType(
             TestTimelineAuthenticationFilter.class.getName());
@@ -199,6 +200,7 @@ public class TestTimelineAuthenticationFilter {
     KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<Void>() {
       @Override
       public Void call() throws Exception {
+        TimelineClient client = createTimelineClientForUGI();
         TimelineDomain domainToStore = new TimelineDomain();
         domainToStore.setId(TestTimelineAuthenticationFilter.class.getName());
         domainToStore.setReaders("*");
@@ -215,119 +217,96 @@ public class TestTimelineAuthenticationFilter {
 
   @Test
   public void testDelegationTokenOperations() throws Exception {
-    KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<Void>() {
-      @Override
-      public Void call() throws Exception {
-        // Let HTTP user to get the delegation for itself
-        Token<TimelineDelegationTokenIdentifier> token =
-            client.getDelegationToken(
-                UserGroupInformation.getCurrentUser().getShortUserName());
-        Assert.assertNotNull(token);
-        TimelineDelegationTokenIdentifier tDT = token.decodeIdentifier();
-        Assert.assertNotNull(tDT);
-        Assert.assertEquals(new Text(HTTP_USER), tDT.getOwner());
+    TimelineClient httpUserClient =
+      KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<TimelineClient>() {
+        @Override
+        public TimelineClient call() throws Exception {
+          return createTimelineClientForUGI();
+        }
+      });
+    UserGroupInformation httpUser =
+      KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<UserGroupInformation>() {
+        @Override
+        public UserGroupInformation call() throws Exception {
+          return UserGroupInformation.getCurrentUser();
+        }
+      });
+    // Let HTTP user to get the delegation for itself
+    Token<TimelineDelegationTokenIdentifier> token =
+      httpUserClient.getDelegationToken(httpUser.getShortUserName());
+    Assert.assertNotNull(token);
+    TimelineDelegationTokenIdentifier tDT = token.decodeIdentifier();
+    Assert.assertNotNull(tDT);
+    Assert.assertEquals(new Text(HTTP_USER), tDT.getOwner());
 
-        // Renew token
-        long renewTime1 = client.renewDelegationToken(token);
-        Thread.sleep(100);
-        long renewTime2 = client.renewDelegationToken(token);
-        Assert.assertTrue(renewTime1 < renewTime2);
+    // Renew token
+    long renewTime1 = httpUserClient.renewDelegationToken(token);
+    Thread.sleep(100);
+    long renewTime2 = httpUserClient.renewDelegationToken(token);
+    Assert.assertTrue(renewTime1 < renewTime2);
 
-        // Cancel token
-        client.cancelDelegationToken(token);
-        // Renew should not be successful because the token is canceled
-        try {
-          client.renewDelegationToken(token);
-          Assert.fail();
-        } catch (Exception e) {
-          Assert.assertTrue(e.getMessage().contains(
-              "Renewal request for unknown token"));
-        }
+    // Cancel token
+    httpUserClient.cancelDelegationToken(token);
+    // Renew should not be successful because the token is canceled
+    try {
+      httpUserClient.renewDelegationToken(token);
+      Assert.fail();
+    } catch (Exception e) {
+      Assert.assertTrue(e.getMessage().contains(
+            "Renewal request for unknown token"));
+    }
 
-        // Let HTTP user to get the delegation token for FOO user
-        UserGroupInformation fooUgi = UserGroupInformation.createProxyUser(
-            FOO_USER, UserGroupInformation.getCurrentUser());
-        token = fooUgi.doAs(
-            new PrivilegedExceptionAction<Token<TimelineDelegationTokenIdentifier>>() {
+    // Let HTTP user to get the delegation token for FOO user
+    UserGroupInformation fooUgi = UserGroupInformation.createProxyUser(
+        FOO_USER, httpUser);
+    TimelineClient fooUserClient = fooUgi.doAs(
+        new PrivilegedExceptionAction<TimelineClient>() {
           @Override
-          public Token<TimelineDelegationTokenIdentifier> run()
-              throws Exception {
-            return client.getDelegationToken(
-                UserGroupInformation.getCurrentUser().getShortUserName());
+          public TimelineClient run() throws Exception {
+            return createTimelineClientForUGI();
           }
         });
-        Assert.assertNotNull(token);
-        tDT = token.decodeIdentifier();
-        Assert.assertNotNull(tDT);
-        Assert.assertEquals(new Text(FOO_USER), tDT.getOwner());
-        Assert.assertEquals(new Text(HTTP_USER), tDT.getRealUser());
+    token = fooUserClient.getDelegationToken(httpUser.getShortUserName());
+    Assert.assertNotNull(token);
+    tDT = token.decodeIdentifier();
+    Assert.assertNotNull(tDT);
+    Assert.assertEquals(new Text(FOO_USER), tDT.getOwner());
+    Assert.assertEquals(new Text(HTTP_USER), tDT.getRealUser());
 
-        // Renew token
-        final Token<TimelineDelegationTokenIdentifier> tokenToRenew = token;
-        renewTime1 = fooUgi.doAs(
-            new PrivilegedExceptionAction<Long>() {
-          @Override
-          public Long run() throws Exception {
-            return client.renewDelegationToken(tokenToRenew);
-          }
-        });
-        renewTime2 = fooUgi.doAs(
-            new PrivilegedExceptionAction<Long>() {
-          @Override
-          public Long run() throws Exception {
-            return client.renewDelegationToken(tokenToRenew);
-          }
-        });
-        Assert.assertTrue(renewTime1 < renewTime2);
+    // Renew token as the renewer
+    final Token<TimelineDelegationTokenIdentifier> tokenToRenew = token;
+    renewTime1 = httpUserClient.renewDelegationToken(tokenToRenew);
+    renewTime2 = httpUserClient.renewDelegationToken(tokenToRenew);
+    Assert.assertTrue(renewTime1 < renewTime2);
 
-        // Cancel token
-        fooUgi.doAs(
-            new PrivilegedExceptionAction<Void>() {
-          @Override
-          public Void run() throws Exception {
-            client.cancelDelegationToken(tokenToRenew);
-            return null;
-          }
-        });
-        // Renew should not be successful because the token is canceled
-        try {
-          fooUgi.doAs(
-              new PrivilegedExceptionAction<Void>() {
-            @Override
-            public Void run() throws Exception {
-              client.renewDelegationToken(tokenToRenew);
-              return null;
-            }
-          });
-          Assert.fail();
-        } catch (Exception e) {
-          Assert.assertTrue(e.getMessage().contains(
-              "Renewal request for unknown token"));
-        }
+    // Cancel token
+    fooUserClient.cancelDelegationToken(tokenToRenew);
+
+    // Renew should not be successful because the token is canceled
+    try {
+      httpUserClient.renewDelegationToken(tokenToRenew);
+      Assert.fail();
+    } catch (Exception e) {
+      Assert.assertTrue(
+          e.getMessage().contains("Renewal request for unknown token"));
+    }
 
-        // Let HTTP user to get the delegation token for BAR user
-        UserGroupInformation barUgi = UserGroupInformation.createProxyUser(
-            BAR_USER, UserGroupInformation.getCurrentUser());
-        token = barUgi.doAs(
-            new PrivilegedExceptionAction<Token<TimelineDelegationTokenIdentifier>>() {
+    // Let HTTP user to get the delegation token for BAR user
+    UserGroupInformation barUgi = UserGroupInformation.createProxyUser(
+        BAR_USER, httpUser);
+    TimelineClient barUserClient = barUgi.doAs(
+        new PrivilegedExceptionAction<TimelineClient>() {
           @Override
-          public Token<TimelineDelegationTokenIdentifier> run()
-              throws Exception {
-            try {
-              Token<TimelineDelegationTokenIdentifier> token =
-                  client.getDelegationToken(
-                      UserGroupInformation.getCurrentUser().getShortUserName());
-              Assert.fail();
-              return token;
-            } catch (Exception e) {
-              Assert.assertTrue(e instanceof AuthorizationException);
-              return null;
-            }
+          public TimelineClient run() {
+            return createTimelineClientForUGI();
           }
         });
-        return null;
-      }
-    });
-  }
 
+    try {
+      barUserClient.getDelegationToken(httpUser.getShortUserName());
+      Assert.fail();
+    } catch (Exception e) {
+      Assert.assertTrue(e.getCause() instanceof AuthorizationException || e.getCause() instanceof AuthenticationException);
+    }
+  }
 }


[15/49] hadoop git commit: HADOOP-11673. Skip using JUnit Assume in TestCodec. Contributed by Brahma Reddy Battula.

Posted by zj...@apache.org.
HADOOP-11673. Skip using JUnit Assume in TestCodec.
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/790aa679
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/790aa679
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/790aa679

Branch: refs/heads/YARN-2928
Commit: 790aa6790edbbe6cefc9f9b554169d67e47f4200
Parents: 6ee0d32
Author: Chris Douglas <cd...@apache.org>
Authored: Sun Mar 8 19:15:46 2015 -0700
Committer: Chris Douglas <cd...@apache.org>
Committed: Sun Mar 8 19:18:59 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt     |  3 +++
 .../org/apache/hadoop/io/compress/TestCodec.java    | 16 ++++------------
 2 files changed, 7 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/790aa679/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 16002d5..0af0beb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -184,6 +184,9 @@ Trunk (Unreleased)
     HADOOP-11593. Convert site documentation from apt to markdown (stragglers)
     (Masatake Iwasaki via aw)
 
+    HADOOP-11673. Skip using JUnit Assume in TestCodec. (Brahma Reddy Battula
+    via cdouglas)
+
   BUG FIXES
 
     HADOOP-11473. test-patch says "-1 overall" even when all checks are +1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/790aa679/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
index 98b3934..7246bf5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
@@ -74,6 +74,7 @@ import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Test;
 import static org.junit.Assert.*;
+import static org.junit.Assume.*;
 
 public class TestCodec {
 
@@ -364,10 +365,7 @@ public class TestCodec {
   public void testCodecPoolGzipReuse() throws Exception {
     Configuration conf = new Configuration();
     conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
-    if (!ZlibFactory.isNativeZlibLoaded(conf)) {
-      LOG.warn("testCodecPoolGzipReuse skipped: native libs not loaded");
-      return;
-    }
+    assumeTrue(ZlibFactory.isNativeZlibLoaded(conf));
     GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
     DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
     Compressor c1 = CodecPool.getCompressor(gzc);
@@ -723,10 +721,7 @@ public class TestCodec {
   public void testNativeGzipConcat() throws IOException {
     Configuration conf = new Configuration();
     conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
-    if (!ZlibFactory.isNativeZlibLoaded(conf)) {
-      LOG.warn("skipped: native libs not loaded");
-      return;
-    }
+    assumeTrue(ZlibFactory.isNativeZlibLoaded(conf));
     GzipConcatTest(conf, GzipCodec.GzipZlibDecompressor.class);
   }
 
@@ -840,10 +835,7 @@ public class TestCodec {
     Configuration conf = new Configuration();
     conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, useNative);
     if (useNative) {
-      if (!ZlibFactory.isNativeZlibLoaded(conf)) {
-        LOG.warn("testGzipCodecWrite skipped: native libs not loaded");
-        return;
-      }
+      assumeTrue(ZlibFactory.isNativeZlibLoaded(conf));
     } else {
       assertFalse("ZlibFactory is using native libs against request",
           ZlibFactory.isNativeZlibLoaded(conf));


[06/49] hadoop git commit: HADOOP-11653. shellprofiles should require .sh extension (Brahma Reddy Battula via aw)

Posted by zj...@apache.org.
HADOOP-11653. shellprofiles should require .sh extension (Brahma Reddy Battula via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01bfe6f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01bfe6f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01bfe6f0

Branch: refs/heads/YARN-2928
Commit: 01bfe6f05b5eadfe7c37806bc13eed047e0da300
Parents: d1abc5d
Author: Allen Wittenauer <aw...@apache.org>
Authored: Fri Mar 6 13:54:11 2015 -0800
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Fri Mar 6 13:54:11 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../src/main/bin/hadoop-functions.sh            |   4 +-
 .../src/main/conf/shellprofile.d/example        | 106 -------------------
 .../src/main/conf/shellprofile.d/example.sh     | 106 +++++++++++++++++++
 .../hadoop-hdfs/src/main/shellprofile.d/hdfs    |  36 -------
 .../hadoop-hdfs/src/main/shellprofile.d/hdfs.sh |  36 +++++++
 .../shellprofile.d/mapreduce                    |  41 -------
 .../shellprofile.d/mapreduce.sh                 |  41 +++++++
 .../hadoop-yarn/shellprofile.d/yarn             |  62 -----------
 .../hadoop-yarn/shellprofile.d/yarn.sh          |  62 +++++++++++
 10 files changed, 250 insertions(+), 247 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bfe6f0/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 65c6d85..628faa3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -414,6 +414,9 @@ Trunk (Unreleased)
 
     HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)
 
+    HADOOP-11653. shellprofiles should require .sh extension
+    (Brahma Reddy Battula via aw)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bfe6f0/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index bccbe25..9488e3c 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -162,13 +162,13 @@ function hadoop_import_shellprofiles
   local files2
 
   if [[ -d "${HADOOP_LIBEXEC_DIR}/shellprofile.d" ]]; then
-    files1=(${HADOOP_LIBEXEC_DIR}/shellprofile.d/*)
+    files1=(${HADOOP_LIBEXEC_DIR}/shellprofile.d/*.sh)
   else
     hadoop_error "WARNING: ${HADOOP_LIBEXEC_DIR}/shellprofile.d doesn't exist. Functionality may not work."
   fi
 
   if [[ -d "${HADOOP_CONF_DIR}/shellprofile.d" ]]; then
-    files2=(${HADOOP_CONF_DIR}/shellprofile.d/*)
+    files2=(${HADOOP_CONF_DIR}/shellprofile.d/*.sh)
   fi
 
   for i in "${files1[@]}" "${files2[@]}"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bfe6f0/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example b/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example
deleted file mode 100644
index dc50821..0000000
--- a/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example
+++ /dev/null
@@ -1,106 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# This is an example shell profile.  It does not do anything other than
-# show an example of what the general structure and API of the pluggable
-# shell profile code looks like.
-#
-#
-
-#
-#  First, register the profile:
-#
-# hadoop_add_profile example
-#
-#
-# This profile name determines what the name of the functions will
-# be. The general pattern is _(profilename)_hadoop_(capability).  There
-# are currently four capabilities:
-#  * init
-#  * classpath
-#  * nativelib
-#  * finalize
-#
-# None of these functions are required.  Examples of all four follow...
-
-#
-# The _hadoop_init function is called near the very beginning of the
-# execution cycle. System and site-level shell env vars have been set,
-# command line processing finished, etc.  Note that the user's .hadooprc
-# has not yet been processed.  This is to allow them to override anything
-# that may be set here or potentially a dependency!
-#
-# function _example_hadoop_init
-# {
-#   # This example expects a home.  So set a default if not set.
-#   EXAMPLE_HOME="${EXAMPLE_HOME:-/usr/example}"
-# }
-#
-
-#
-# The _hadoop_classpath function is called when the shell code is
-# establishing the classpath.  This function should use the
-# shell hadoop_add_classpath function rather than directly
-# manipulating the CLASSPATH variable.  This ensures that the
-# CLASSPATH does not have duplicates and provides basic
-# sanity checks
-#
-# function _example_hadoop_classpath
-# {
-#   # jars that should be near the front
-#   hadoop_add_classpath "${EXAMPLE_HOME}/share/pre-jars/*" before
-#
-#   # jars that should be near the back
-#   hadoop_add_classpath "${EXAMPLE_HOME}/share/post-jars/*" after
-# }
-
-#
-# The _hadoop_nativelib function is called when the shell code is
-# buliding the locations for linkable shared libraries. Depending
-# upon needs, there are shell function calls that are useful
-# to use here:
-#
-# hadoop_add_javalibpath will push the path onto the command line
-# and into the java.library.path system property.  In the majority
-# of cases, this should be sufficient, especially if the shared
-# library has been linked correctly with $ORIGIN.
-#
-# hadoop_add_ldlibpath will push the path into the LD_LIBRARY_PATH
-# env var.  This should be unnecessary for most code.
-#
-# function _example_hadoop_nativelib
-# {
-#   # our library is standalone, so just need the basic path
-#   # added. Using after so we are later in the link list
-#   hadoop_add_javalibpath "${EXAMPLE_HOME}/lib" after
-# }
-
-#
-# The _hadoop_finalize function is called to finish up whatever
-# extra work needs to be done prior to exec'ing java or some other
-# binary. This is where command line properties should get added
-# and any last minute work.  This is called prior to Hadoop common
-# which means that one can override any parameters that Hadoop
-# would normally put here... so be careful!
-#
-# Useful functions here include hadoop_add_param and for
-# Windows compabitility, hadoop_translate_cygwin_path.
-#
-# function _example_hadoop_finalize
-# {
-#   # we need a property for our feature
-#   hadoop_add_param HADOOP_OPTS Dexample.feature "-Dexample.feature=awesome"
-# }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bfe6f0/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example.sh b/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example.sh
new file mode 100644
index 0000000..dc50821
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example.sh
@@ -0,0 +1,106 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This is an example shell profile.  It does not do anything other than
+# show an example of what the general structure and API of the pluggable
+# shell profile code looks like.
+#
+#
+
+#
+#  First, register the profile:
+#
+# hadoop_add_profile example
+#
+#
+# This profile name determines what the name of the functions will
+# be. The general pattern is _(profilename)_hadoop_(capability).  There
+# are currently four capabilities:
+#  * init
+#  * classpath
+#  * nativelib
+#  * finalize
+#
+# None of these functions are required.  Examples of all four follow...
+
+#
+# The _hadoop_init function is called near the very beginning of the
+# execution cycle. System and site-level shell env vars have been set,
+# command line processing finished, etc.  Note that the user's .hadooprc
+# has not yet been processed.  This is to allow them to override anything
+# that may be set here or potentially a dependency!
+#
+# function _example_hadoop_init
+# {
+#   # This example expects a home.  So set a default if not set.
+#   EXAMPLE_HOME="${EXAMPLE_HOME:-/usr/example}"
+# }
+#
+
+#
+# The _hadoop_classpath function is called when the shell code is
+# establishing the classpath.  This function should use the
+# shell hadoop_add_classpath function rather than directly
+# manipulating the CLASSPATH variable.  This ensures that the
+# CLASSPATH does not have duplicates and provides basic
+# sanity checks
+#
+# function _example_hadoop_classpath
+# {
+#   # jars that should be near the front
+#   hadoop_add_classpath "${EXAMPLE_HOME}/share/pre-jars/*" before
+#
+#   # jars that should be near the back
+#   hadoop_add_classpath "${EXAMPLE_HOME}/share/post-jars/*" after
+# }
+
+#
+# The _hadoop_nativelib function is called when the shell code is
+# buliding the locations for linkable shared libraries. Depending
+# upon needs, there are shell function calls that are useful
+# to use here:
+#
+# hadoop_add_javalibpath will push the path onto the command line
+# and into the java.library.path system property.  In the majority
+# of cases, this should be sufficient, especially if the shared
+# library has been linked correctly with $ORIGIN.
+#
+# hadoop_add_ldlibpath will push the path into the LD_LIBRARY_PATH
+# env var.  This should be unnecessary for most code.
+#
+# function _example_hadoop_nativelib
+# {
+#   # our library is standalone, so just need the basic path
+#   # added. Using after so we are later in the link list
+#   hadoop_add_javalibpath "${EXAMPLE_HOME}/lib" after
+# }
+
+#
+# The _hadoop_finalize function is called to finish up whatever
+# extra work needs to be done prior to exec'ing java or some other
+# binary. This is where command line properties should get added
+# and any last minute work.  This is called prior to Hadoop common
+# which means that one can override any parameters that Hadoop
+# would normally put here... so be careful!
+#
+# Useful functions here include hadoop_add_param and for
+# Windows compabitility, hadoop_translate_cygwin_path.
+#
+# function _example_hadoop_finalize
+# {
+#   # we need a property for our feature
+#   hadoop_add_param HADOOP_OPTS Dexample.feature "-Dexample.feature=awesome"
+# }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bfe6f0/hadoop-hdfs-project/hadoop-hdfs/src/main/shellprofile.d/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/shellprofile.d/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/shellprofile.d/hdfs
deleted file mode 100644
index 5eb9e48..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/shellprofile.d/hdfs
+++ /dev/null
@@ -1,36 +0,0 @@
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-hadoop_add_profile hdfs
-
-function _hdfs_hadoop_classpath
-{
-  #
-  # get all of the hdfs jars+config in the path
-  #
-  # developers
-  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
-    hadoop_add_classpath "${HADOOP_HDFS_HOME}/hadoop-hdfs/target/classes"
-  fi
-
-  # put hdfs in classpath if present
-  if [[ -d "${HADOOP_HDFS_HOME}/${HDFS_DIR}/webapps" ]]; then
-    hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_DIR}"
-  fi
-
-  hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR}"'/*'
-  hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_DIR}"'/*'
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bfe6f0/hadoop-hdfs-project/hadoop-hdfs/src/main/shellprofile.d/hdfs.sh
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/shellprofile.d/hdfs.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/shellprofile.d/hdfs.sh
new file mode 100644
index 0000000..5eb9e48
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/shellprofile.d/hdfs.sh
@@ -0,0 +1,36 @@
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+hadoop_add_profile hdfs
+
+function _hdfs_hadoop_classpath
+{
+  #
+  # get all of the hdfs jars+config in the path
+  #
+  # developers
+  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
+    hadoop_add_classpath "${HADOOP_HDFS_HOME}/hadoop-hdfs/target/classes"
+  fi
+
+  # put hdfs in classpath if present
+  if [[ -d "${HADOOP_HDFS_HOME}/${HDFS_DIR}/webapps" ]]; then
+    hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_DIR}"
+  fi
+
+  hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR}"'/*'
+  hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_DIR}"'/*'
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bfe6f0/hadoop-mapreduce-project/shellprofile.d/mapreduce
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/shellprofile.d/mapreduce b/hadoop-mapreduce-project/shellprofile.d/mapreduce
deleted file mode 100644
index 0b3dab1..0000000
--- a/hadoop-mapreduce-project/shellprofile.d/mapreduce
+++ /dev/null
@@ -1,41 +0,0 @@
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-hadoop_add_profile mapred
-
-function _mapred_hadoop_classpath
-{
-  #
-  # get all of the mapreduce jars+config in the path
-  #
-  # developers
-  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
-    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-shuffle/target/classes"
-    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-common/target/classes"
-    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-hs/target/classes"
-    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-hs-plugins/target/classes"
-    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-app/target/classes"
-    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-jobclient/target/classes"
-    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-core/target/classes"
-  fi
-
-  if [[ -d "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}/webapps" ]]; then
-    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"
-  fi
-
-  hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_LIB_JARS_DIR}"'/*'
-  hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"'/*'
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bfe6f0/hadoop-mapreduce-project/shellprofile.d/mapreduce.sh
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/shellprofile.d/mapreduce.sh b/hadoop-mapreduce-project/shellprofile.d/mapreduce.sh
new file mode 100644
index 0000000..0b3dab1
--- /dev/null
+++ b/hadoop-mapreduce-project/shellprofile.d/mapreduce.sh
@@ -0,0 +1,41 @@
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+hadoop_add_profile mapred
+
+function _mapred_hadoop_classpath
+{
+  #
+  # get all of the mapreduce jars+config in the path
+  #
+  # developers
+  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-shuffle/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-common/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-hs/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-hs-plugins/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-app/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-jobclient/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-core/target/classes"
+  fi
+
+  if [[ -d "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}/webapps" ]]; then
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"
+  fi
+
+  hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_LIB_JARS_DIR}"'/*'
+  hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"'/*'
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bfe6f0/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn b/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn
deleted file mode 100644
index 4aa20b1..0000000
--- a/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn
+++ /dev/null
@@ -1,62 +0,0 @@
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-hadoop_add_profile yarn
-
-function _yarn_hadoop_classpath
-{
-  local i
-  #
-  # get all of the yarn jars+config in the path
-  #
-  # developers
-  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
-    for i in yarn-api yarn-common yarn-mapreduce yarn-master-worker \
-             yarn-server/yarn-server-nodemanager \
-             yarn-server/yarn-server-common \
-             yarn-server/yarn-server-resourcemanager; do
-      hadoop_add_classpath "${HADOOP_YARN_HOME}/$i/target/classes"
-    done
-
-    hadoop_add_classpath "${HADOOP_YARN_HOME}/build/test/classes"
-    hadoop_add_classpath "${HADOOP_YARN_HOME}/build/tools"
-  fi
-
-  if [[ -d "${HADOOP_YARN_HOME}/${YARN_DIR}/webapps" ]]; then
-    hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}"
-  fi
-
-  hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}"'/*'
-  hadoop_add_classpath  "${HADOOP_YARN_HOME}/${YARN_DIR}"'/*'
-}
-
-function _yarn_hadoop_finalize
-{
-  # Add YARN custom options to comamnd line in case someone actaully
-  # used these.
-  #
-  # Note that we are replacing ' ' with '\ ' so that when we exec
-  # stuff it works
-  #
-  local yld=$HADOOP_LOG_DIR
-  hadoop_translate_cygwin_path yld
-  hadoop_add_param HADOOP_OPTS yarn.log.dir "-Dyarn.log.dir=${yld}"
-  hadoop_add_param HADOOP_OPTS yarn.log.file "-Dyarn.log.file=${HADOOP_LOGFILE}"
-  local yhd=$HADOOP_YARN_HOME
-  hadoop_translate_cygwin_path yhd
-  hadoop_add_param HADOOP_OPTS yarn.home.dir "-Dyarn.home.dir=${yhd}"
-  hadoop_add_param HADOOP_OPTS yarn.root.logger "-Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bfe6f0/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn.sh
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn.sh b/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn.sh
new file mode 100644
index 0000000..4aa20b1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn.sh
@@ -0,0 +1,62 @@
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+hadoop_add_profile yarn
+
+function _yarn_hadoop_classpath
+{
+  local i
+  #
+  # get all of the yarn jars+config in the path
+  #
+  # developers
+  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
+    for i in yarn-api yarn-common yarn-mapreduce yarn-master-worker \
+             yarn-server/yarn-server-nodemanager \
+             yarn-server/yarn-server-common \
+             yarn-server/yarn-server-resourcemanager; do
+      hadoop_add_classpath "${HADOOP_YARN_HOME}/$i/target/classes"
+    done
+
+    hadoop_add_classpath "${HADOOP_YARN_HOME}/build/test/classes"
+    hadoop_add_classpath "${HADOOP_YARN_HOME}/build/tools"
+  fi
+
+  if [[ -d "${HADOOP_YARN_HOME}/${YARN_DIR}/webapps" ]]; then
+    hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}"
+  fi
+
+  hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}"'/*'
+  hadoop_add_classpath  "${HADOOP_YARN_HOME}/${YARN_DIR}"'/*'
+}
+
+function _yarn_hadoop_finalize
+{
+  # Add YARN custom options to comamnd line in case someone actaully
+  # used these.
+  #
+  # Note that we are replacing ' ' with '\ ' so that when we exec
+  # stuff it works
+  #
+  local yld=$HADOOP_LOG_DIR
+  hadoop_translate_cygwin_path yld
+  hadoop_add_param HADOOP_OPTS yarn.log.dir "-Dyarn.log.dir=${yld}"
+  hadoop_add_param HADOOP_OPTS yarn.log.file "-Dyarn.log.file=${HADOOP_LOGFILE}"
+  local yhd=$HADOOP_YARN_HOME
+  hadoop_translate_cygwin_path yhd
+  hadoop_add_param HADOOP_OPTS yarn.home.dir "-Dyarn.home.dir=${yhd}"
+  hadoop_add_param HADOOP_OPTS yarn.root.logger "-Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+}


[03/49] hadoop git commit: YARN-1809. Synchronize RM and TimeLineServer Web-UIs. Contributed by Zhijie Shen and Xuan Gong

Posted by zj...@apache.org.
YARN-1809. Synchronize RM and TimeLineServer Web-UIs. Contributed by Zhijie Shen and Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95bfd087
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95bfd087
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95bfd087

Branch: refs/heads/YARN-2928
Commit: 95bfd087dc89e57a93340604cc8b96042fa1a05a
Parents: 952640f
Author: Jian He <ji...@apache.org>
Authored: Thu Mar 5 21:14:41 2015 -0800
Committer: Jian He <ji...@apache.org>
Committed: Thu Mar 5 21:20:09 2015 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../dev-support/findbugs-exclude.xml            |   5 +-
 .../yarn/api/ApplicationBaseProtocol.java       | 355 +++++++++++++++++++
 .../yarn/api/ApplicationClientProtocol.java     | 290 +--------------
 .../yarn/api/ApplicationHistoryProtocol.java    | 303 +---------------
 .../apache/hadoop/yarn/webapp/ResponseInfo.java |   6 +-
 .../hadoop/yarn/webapp/YarnWebParams.java       |   4 +
 .../hadoop/yarn/webapp/view/HtmlBlock.java      |   2 +
 .../ApplicationHistoryClientService.java        | 185 +++++-----
 .../ApplicationHistoryManager.java              | 126 ++++++-
 .../ApplicationHistoryServer.java               |   2 +-
 .../webapp/AHSView.java                         |  28 +-
 .../webapp/AHSWebApp.java                       |  16 +-
 .../webapp/AHSWebServices.java                  |   6 +-
 .../webapp/AppAttemptPage.java                  |  15 +-
 .../webapp/AppPage.java                         |  21 +-
 .../TestApplicationHistoryClientService.java    |  12 +-
 .../webapp/TestAHSWebApp.java                   |  27 +-
 .../webapp/TestAHSWebServices.java              |  26 +-
 .../yarn/server/api/ApplicationContext.java     | 122 -------
 .../yarn/server/webapp/AppAttemptBlock.java     | 119 ++++---
 .../hadoop/yarn/server/webapp/AppBlock.java     | 274 ++++++++++++--
 .../hadoop/yarn/server/webapp/AppsBlock.java    |  53 ++-
 .../yarn/server/webapp/ContainerBlock.java      |  29 +-
 .../hadoop/yarn/server/webapp/WebPageUtils.java |  86 +++++
 .../hadoop/yarn/server/webapp/WebServices.java  |  68 +++-
 .../hadoop/yarn/server/webapp/dao/AppInfo.java  |  11 +-
 .../resourcemanager/webapp/AppAttemptPage.java  |  55 +++
 .../server/resourcemanager/webapp/AppBlock.java | 344 ------------------
 .../server/resourcemanager/webapp/AppPage.java  |  25 +-
 .../resourcemanager/webapp/AppsBlock.java       | 132 -------
 .../webapp/AppsBlockWithMetrics.java            |   1 +
 .../webapp/CapacitySchedulerPage.java           |   1 +
 .../resourcemanager/webapp/ContainerPage.java   |  44 +++
 .../webapp/DefaultSchedulerPage.java            |   1 +
 .../webapp/FairSchedulerPage.java               |  21 +-
 .../server/resourcemanager/webapp/RMWebApp.java |   5 +
 .../resourcemanager/webapp/RmController.java    |   8 +
 .../server/resourcemanager/webapp/RmView.java   |  31 +-
 .../resourcemanager/webapp/TestAppPage.java     |   8 +-
 .../resourcemanager/webapp/TestRMWebApp.java    |  48 ++-
 .../webapp/TestRMWebAppFairScheduler.java       |  14 +-
 42 files changed, 1376 insertions(+), 1556 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index dcf328f..accde78 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -360,6 +360,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3122. Metrics for container's actual CPU usage. 
     (Anubhav Dhoot via kasha)
 
+    YARN-1809. Synchronize RM and TimeLineServer Web-UIs. (Zhijie Shen and
+    Xuan Gong via jianhe)
+
   OPTIMIZATIONS
 
     YARN-2990. FairScheduler's delay-scheduling always waits for node-local and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 1c3f201..a89884a 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -63,9 +63,12 @@
     <Bug pattern="BC_UNCONFIRMED_CAST" />
   </Match>
   <Match>
-    <Class name="~org\.apache\.hadoop\.yarn\.server\.resourcemanager\.rmapp\.attempt\.RMAppAttemptMetrics" />
+    <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
     <Method name="getLocalityStatistics" />
     <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
     <Method name="incNumAllocatedContainers"/>
     <Bug pattern="VO_VOLATILE_INCREMENT" />
   </Match>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationBaseProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationBaseProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationBaseProtocol.java
new file mode 100644
index 0000000..2a8a283
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationBaseProtocol.java
@@ -0,0 +1,355 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.io.retry.Idempotent;
+import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerReport;
+import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+/**
+ * <p>
+ * The protocol between clients and the <code>ResourceManager</code> or
+ * <code>ApplicationHistoryServer</code> to get information on applications,
+ * application attempts and containers.
+ * </p>
+ *
+ */
+@Private
+@Unstable
+public interface ApplicationBaseProtocol {
+
+  /**
+   * <p>
+   * The interface used by clients to get a report of an Application from the
+   * <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>.
+   * </p>
+   *
+   * <p>
+   * The client, via {@link GetApplicationReportRequest} provides the
+   * {@link ApplicationId} of the application.
+   * </p>
+   *
+   * <p>
+   * In secure mode,the <code>ResourceManager</code> or
+   * <code>ApplicationHistoryServer</code> verifies access to the application,
+   * queue etc. before accepting the request.
+   * </p>
+   *
+   * <p>
+   * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
+   * responds with a {@link GetApplicationReportResponse} which includes the
+   * {@link ApplicationReport} for the application.
+   * </p>
+   *
+   * <p>
+   * If the user does not have <code>VIEW_APP</code> access then the following
+   * fields in the report will be set to stubbed values:
+   * <ul>
+   * <li>host - set to "N/A"</li>
+   * <li>RPC port - set to -1</li>
+   * <li>client token - set to "N/A"</li>
+   * <li>diagnostics - set to "N/A"</li>
+   * <li>tracking URL - set to "N/A"</li>
+   * <li>original tracking URL - set to "N/A"</li>
+   * <li>resource usage report - all values are -1</li>
+   * </ul>
+   * </p>
+   *
+   * @param request
+   *          request for an application report
+   * @return application report
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Stable
+  @Idempotent
+  public GetApplicationReportResponse getApplicationReport(
+      GetApplicationReportRequest request) throws YarnException, IOException;
+
+  /**
+   * <p>
+   * The interface used by clients to get a report of Applications matching the
+   * filters defined by {@link GetApplicationsRequest} in the cluster from the
+   * <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>.
+   * </p>
+   *
+   * <p>
+   * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
+   * responds with a {@link GetApplicationsResponse} which includes the
+   * {@link ApplicationReport} for the applications.
+   * </p>
+   *
+   * <p>
+   * If the user does not have <code>VIEW_APP</code> access for an application
+   * then the corresponding report will be filtered as described in
+   * {@link #getApplicationReport(GetApplicationReportRequest)}.
+   * </p>
+   *
+   * @param request
+   *          request for report on applications
+   * @return report on applications matching the given application types defined
+   *         in the request
+   * @throws YarnException
+   * @throws IOException
+   * @see GetApplicationsRequest
+   */
+  @Public
+  @Stable
+  @Idempotent
+  public GetApplicationsResponse
+      getApplications(GetApplicationsRequest request) throws YarnException,
+          IOException;
+
+  /**
+   * <p>
+   * The interface used by clients to get a report of an Application Attempt
+   * from the <code>ResourceManager</code> or
+   * <code>ApplicationHistoryServer</code>
+   * </p>
+   *
+   * <p>
+   * The client, via {@link GetApplicationAttemptReportRequest} provides the
+   * {@link ApplicationAttemptId} of the application attempt.
+   * </p>
+   *
+   * <p>
+   * In secure mode,the <code>ResourceManager</code> or
+   * <code>ApplicationHistoryServer</code> verifies access to the method before
+   * accepting the request.
+   * </p>
+   *
+   * <p>
+   * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
+   * responds with a {@link GetApplicationAttemptReportResponse} which includes
+   * the {@link ApplicationAttemptReport} for the application attempt.
+   * </p>
+   *
+   * <p>
+   * If the user does not have <code>VIEW_APP</code> access then the following
+   * fields in the report will be set to stubbed values:
+   * <ul>
+   * <li>host</li>
+   * <li>RPC port</li>
+   * <li>client token</li>
+   * <li>diagnostics - set to "N/A"</li>
+   * <li>tracking URL</li>
+   * </ul>
+   * </p>
+   *
+   * @param request
+   *          request for an application attempt report
+   * @return application attempt report
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  @Idempotent
+  public GetApplicationAttemptReportResponse getApplicationAttemptReport(
+      GetApplicationAttemptReportRequest request) throws YarnException,
+      IOException;
+
+  /**
+   * <p>
+   * The interface used by clients to get a report of all Application attempts
+   * in the cluster from the <code>ResourceManager</code> or
+   * <code>ApplicationHistoryServer</code>
+   * </p>
+   *
+   * <p>
+   * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
+   * responds with a {@link GetApplicationAttemptsRequest} which includes the
+   * {@link ApplicationAttemptReport} for all the applications attempts of a
+   * specified application attempt.
+   * </p>
+   *
+   * <p>
+   * If the user does not have <code>VIEW_APP</code> access for an application
+   * then the corresponding report will be filtered as described in
+   * {@link #getApplicationAttemptReport(GetApplicationAttemptReportRequest)}.
+   * </p>
+   *
+   * @param request
+   *          request for reports on all application attempts of an application
+   * @return reports on all application attempts of an application
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  @Idempotent
+  public GetApplicationAttemptsResponse getApplicationAttempts(
+      GetApplicationAttemptsRequest request) throws YarnException, IOException;
+
+  /**
+   * <p>
+   * The interface used by clients to get a report of an Container from the
+   * <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
+   * </p>
+   *
+   * <p>
+   * The client, via {@link GetContainerReportRequest} provides the
+   * {@link ContainerId} of the container.
+   * </p>
+   *
+   * <p>
+   * In secure mode,the <code>ResourceManager</code> or
+   * <code>ApplicationHistoryServer</code> verifies access to the method before
+   * accepting the request.
+   * </p>
+   *
+   * <p>
+   * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
+   * responds with a {@link GetContainerReportResponse} which includes the
+   * {@link ContainerReport} for the container.
+   * </p>
+   *
+   * @param request
+   *          request for a container report
+   * @return container report
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  @Idempotent
+  public GetContainerReportResponse getContainerReport(
+      GetContainerReportRequest request) throws YarnException, IOException;
+
+  /**
+   * <p>
+   * The interface used by clients to get a report of Containers for an
+   * application attempt from the <code>ResourceManager</code> or
+   * <code>ApplicationHistoryServer</code>
+   * </p>
+   *
+   * <p>
+   * The client, via {@link GetContainersRequest} provides the
+   * {@link ApplicationAttemptId} of the application attempt.
+   * </p>
+   *
+   * <p>
+   * In secure mode,the <code>ResourceManager</code> or
+   * <code>ApplicationHistoryServer</code> verifies access to the method before
+   * accepting the request.
+   * </p>
+   *
+   * <p>
+   * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
+   * responds with a {@link GetContainersResponse} which includes a list of
+   * {@link ContainerReport} for all the containers of a specific application
+   * attempt.
+   * </p>
+   *
+   * @param request
+   *          request for a list of container reports of an application attempt.
+   * @return reports on all containers of an application attempt
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  @Idempotent
+  public GetContainersResponse getContainers(GetContainersRequest request)
+      throws YarnException, IOException;
+
+  /**
+   * <p>
+   * The interface used by clients to get delegation token, enabling the
+   * containers to be able to talk to the service using those tokens.
+   *
+   * <p>
+   * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
+   * responds with the delegation {@link Token} that can be used by the client
+   * to speak to this service.
+   *
+   * @param request
+   *          request to get a delegation token for the client.
+   * @return delegation token that can be used to talk to this service
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Stable
+  @Idempotent
+  public GetDelegationTokenResponse getDelegationToken(
+      GetDelegationTokenRequest request) throws YarnException, IOException;
+
+  /**
+   * Renew an existing delegation {@link Token}.
+   *
+   * @param request
+   *          the delegation token to be renewed.
+   * @return the new expiry time for the delegation token.
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Private
+  @Unstable
+  @Idempotent
+  public RenewDelegationTokenResponse renewDelegationToken(
+      RenewDelegationTokenRequest request) throws YarnException, IOException;
+
+  /**
+   * Cancel an existing delegation {@link Token}.
+   *
+   * @param request
+   *          the delegation token to be cancelled.
+   * @return an empty response.
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Private
+  @Unstable
+  @Idempotent
+  public CancelDelegationTokenResponse cancelDelegationToken(
+      CancelDelegationTokenRequest request) throws YarnException, IOException;
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
index b5f5cc0..0a7d415 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
@@ -20,33 +20,17 @@ package org.apache.hadoop.yarn.api;
 
 import java.io.IOException;
 
-import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.io.retry.Idempotent;
-import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
@@ -61,8 +45,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest;
@@ -71,19 +53,13 @@ import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
-import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
-import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
@@ -96,7 +72,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
  */
 @Public
 @Stable
-public interface ApplicationClientProtocol {
+public interface ApplicationClientProtocol extends ApplicationBaseProtocol {
   /**
    * <p>The interface used by clients to obtain a new {@link ApplicationId} for 
    * submitting new applications.</p>
@@ -199,44 +175,6 @@ public interface ApplicationClientProtocol {
   throws YarnException, IOException;
 
   /**
-   * <p>The interface used by clients to get a report of an Application from
-   * the <code>ResourceManager</code>.</p>
-   * 
-   * <p>The client, via {@link GetApplicationReportRequest} provides the
-   * {@link ApplicationId} of the application.</p>
-   *
-   * <p> In secure mode,the <code>ResourceManager</code> verifies access to the
-   * application, queue etc. before accepting the request.</p> 
-   * 
-   * <p>The <code>ResourceManager</code> responds with a 
-   * {@link GetApplicationReportResponse} which includes the 
-   * {@link ApplicationReport} for the application.</p>
-   * 
-   * <p>If the user does not have <code>VIEW_APP</code> access then the
-   * following fields in the report will be set to stubbed values:
-   * <ul>
-   *   <li>host - set to "N/A"</li>
-   *   <li>RPC port - set to -1</li>
-   *   <li>client token - set to "N/A"</li>
-   *   <li>diagnostics - set to "N/A"</li>
-   *   <li>tracking URL - set to "N/A"</li>
-   *   <li>original tracking URL - set to "N/A"</li>
-   *   <li>resource usage report - all values are -1</li>
-   * </ul></p>
-   *
-   * @param request request for an application report
-   * @return application report 
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Stable
-  @Idempotent
-  public GetApplicationReportResponse getApplicationReport(
-      GetApplicationReportRequest request) 
-  throws YarnException, IOException;
-  
-  /**
    * <p>The interface used by clients to get metrics about the cluster from
    * the <code>ResourceManager</code>.</p>
    * 
@@ -256,35 +194,7 @@ public interface ApplicationClientProtocol {
   public GetClusterMetricsResponse getClusterMetrics(
       GetClusterMetricsRequest request) 
   throws YarnException, IOException;
-  
-  /**
-   * <p>The interface used by clients to get a report of Applications
-   * matching the filters defined by {@link GetApplicationsRequest}
-   * in the cluster from the <code>ResourceManager</code>.</p>
-   * 
-   * <p>The <code>ResourceManager</code> responds with a 
-   * {@link GetApplicationsResponse} which includes the
-   * {@link ApplicationReport} for the applications.</p>
-   * 
-   * <p>If the user does not have <code>VIEW_APP</code> access for an
-   * application then the corresponding report will be filtered as
-   * described in {@link #getApplicationReport(GetApplicationReportRequest)}.
-   * </p>
-   *
-   * @param request request for report on applications
-   * @return report on applications matching the given application types
-   *           defined in the request
-   * @throws YarnException
-   * @throws IOException
-   * @see GetApplicationsRequest
-   */
-  @Public
-  @Stable
-  @Idempotent
-  public GetApplicationsResponse getApplications(
-      GetApplicationsRequest request)
-  throws YarnException, IOException;
-  
+
   /**
    * <p>The interface used by clients to get a report of all nodes
    * in the cluster from the <code>ResourceManager</code>.</p>
@@ -346,57 +256,8 @@ public interface ApplicationClientProtocol {
   public GetQueueUserAclsInfoResponse getQueueUserAcls(
       GetQueueUserAclsInfoRequest request) 
   throws YarnException, IOException;
-  
-  /**
-   * <p>The interface used by clients to get delegation token, enabling the 
-   * containers to be able to talk to the service using those tokens.
-   * 
-   *  <p> The <code>ResourceManager</code> responds with the delegation
-   *  {@link Token} that can be used by the client to speak to this
-   *  service.
-   * @param request request to get a delegation token for the client.
-   * @return delegation token that can be used to talk to this service
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Stable
-  @Idempotent
-  public GetDelegationTokenResponse getDelegationToken(
-      GetDelegationTokenRequest request) 
-  throws YarnException, IOException;
-  
-  /**
-   * Renew an existing delegation {@link Token}.
-   * 
-   * @param request the delegation token to be renewed.
-   * @return the new expiry time for the delegation token.
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Private
-  @Unstable
-  @Idempotent
-  public RenewDelegationTokenResponse renewDelegationToken(
-      RenewDelegationTokenRequest request) throws YarnException,
-      IOException;
 
   /**
-   * Cancel an existing delegation {@link Token}.
-   * 
-   * @param request the delegation token to be cancelled.
-   * @return an empty response.
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Private
-  @Unstable
-  @Idempotent
-  public CancelDelegationTokenResponse cancelDelegationToken(
-      CancelDelegationTokenRequest request) throws YarnException,
-      IOException;
-  
-  /**
    * Move an application to a new queue.
    * 
    * @param request the application ID and the target queue
@@ -412,153 +273,6 @@ public interface ApplicationClientProtocol {
 
   /**
    * <p>
-   * The interface used by clients to get a report of an Application Attempt
-   * from the <code>ResourceManager</code> 
-   * </p>
-   * 
-   * <p>
-   * The client, via {@link GetApplicationAttemptReportRequest} provides the
-   * {@link ApplicationAttemptId} of the application attempt.
-   * </p>
-   * 
-   * <p>
-   * In secure mode,the <code>ResourceManager</code> verifies access to
-   * the method before accepting the request.
-   * </p>
-   * 
-   * <p>
-   * The <code>ResourceManager</code> responds with a
-   * {@link GetApplicationAttemptReportResponse} which includes the
-   * {@link ApplicationAttemptReport} for the application attempt.
-   * </p>
-   * 
-   * <p>
-   * If the user does not have <code>VIEW_APP</code> access then the following
-   * fields in the report will be set to stubbed values:
-   * <ul>
-   * <li>host</li>
-   * <li>RPC port</li>
-   * <li>client token</li>
-   * <li>diagnostics - set to "N/A"</li>
-   * <li>tracking URL</li>
-   * </ul>
-   * </p>
-   * 
-   * @param request
-   *          request for an application attempt report
-   * @return application attempt report
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  @Idempotent
-  public GetApplicationAttemptReportResponse getApplicationAttemptReport(
-      GetApplicationAttemptReportRequest request) throws YarnException,
-      IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of all Application attempts
-   * in the cluster from the <code>ResourceManager</code>
-   * </p>
-   * 
-   * <p>
-   * The <code>ResourceManager</code> responds with a
-   * {@link GetApplicationAttemptsRequest} which includes the
-   * {@link ApplicationAttemptReport} for all the applications attempts of a
-   * specified application attempt.
-   * </p>
-   * 
-   * <p>
-   * If the user does not have <code>VIEW_APP</code> access for an application
-   * then the corresponding report will be filtered as described in
-   * {@link #getApplicationAttemptReport(GetApplicationAttemptReportRequest)}.
-   * </p>
-   * 
-   * @param request
-   *          request for reports on all application attempts of an application
-   * @return reports on all application attempts of an application
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  @Idempotent
-  public GetApplicationAttemptsResponse getApplicationAttempts(
-      GetApplicationAttemptsRequest request) throws YarnException, IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of an Container from the
-   * <code>ResourceManager</code>
-   * </p>
-   * 
-   * <p>
-   * The client, via {@link GetContainerReportRequest} provides the
-   * {@link ContainerId} of the container.
-   * </p>
-   * 
-   * <p>
-   * In secure mode,the <code>ResourceManager</code> verifies access to the
-   * method before accepting the request.
-   * </p>
-   * 
-   * <p>
-   * The <code>ResourceManager</code> responds with a
-   * {@link GetContainerReportResponse} which includes the
-   * {@link ContainerReport} for the container.
-   * </p>
-   * 
-   * @param request
-   *          request for a container report
-   * @return container report
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  @Idempotent
-  public GetContainerReportResponse getContainerReport(
-      GetContainerReportRequest request) throws YarnException, IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of Containers for an
-   * application attempt from the <code>ResourceManager</code>
-   * </p>
-   * 
-   * <p>
-   * The client, via {@link GetContainersRequest} provides the
-   * {@link ApplicationAttemptId} of the application attempt.
-   * </p>
-   * 
-   * <p>
-   * In secure mode,the <code>ResourceManager</code> verifies access to the
-   * method before accepting the request.
-   * </p>
-   * 
-   * <p>
-   * The <code>ResourceManager</code> responds with a
-   * {@link GetContainersResponse} which includes a list of
-   * {@link ContainerReport} for all the containers of a specific application
-   * attempt.
-   * </p>
-   * 
-   * @param request
-   *          request for a list of container reports of an application attempt.
-   * @return reports on all containers of an application attempt
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  @Idempotent
-  public GetContainersResponse getContainers(GetContainersRequest request)
-      throws YarnException, IOException;
-
-  /**
-   * <p>
    * The interface used by clients to submit a new reservation to the
    * {@code ResourceManager}.
    * </p>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocol.java
index 0bfd2ed..fc8e885 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocol.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocol.java
@@ -18,37 +18,8 @@
 
 package org.apache.hadoop.yarn.api;
 
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerReport;
-import org.apache.hadoop.yarn.api.records.Token;
-import org.apache.hadoop.yarn.exceptions.YarnException;
 
 /**
  * <p>
@@ -58,277 +29,5 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
  */
 @Public
 @Unstable
-public interface ApplicationHistoryProtocol {
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of an Application from the
-   * <code>ResourceManager</code>.
-   * </p>
-   * 
-   * <p>
-   * The client, via {@link GetApplicationReportRequest} provides the
-   * {@link ApplicationId} of the application.
-   * </p>
-   * 
-   * <p>
-   * In secure mode,the <code>ApplicationHistoryServer</code> verifies access to
-   * the application, queue etc. before accepting the request.
-   * </p>
-   * 
-   * <p>
-   * The <code>ApplicationHistoryServer</code> responds with a
-   * {@link GetApplicationReportResponse} which includes the
-   * {@link ApplicationReport} for the application.
-   * </p>
-   * 
-   * <p>
-   * If the user does not have <code>VIEW_APP</code> access then the following
-   * fields in the report will be set to stubbed values:
-   * <ul>
-   * <li>host - set to "N/A"</li>
-   * <li>RPC port - set to -1</li>
-   * <li>client token - set to "N/A"</li>
-   * <li>diagnostics - set to "N/A"</li>
-   * <li>tracking URL - set to "N/A"</li>
-   * <li>original tracking URL - set to "N/A"</li>
-   * <li>resource usage report - all values are -1</li>
-   * </ul>
-   * </p>
-   * 
-   * @param request
-   *          request for an application report
-   * @return application report
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  public GetApplicationReportResponse getApplicationReport(
-      GetApplicationReportRequest request) throws YarnException, IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of all Applications in the
-   * cluster from the <code>ApplicationHistoryServer</code>.
-   * </p>
-   * 
-   * <p>
-   * The <code>ApplicationHistoryServer</code> responds with a
-   * {@link GetApplicationsResponse} which includes a list of
-   * {@link ApplicationReport} for all the applications.
-   * </p>
-   * 
-   * <p>
-   * If the user does not have <code>VIEW_APP</code> access for an application
-   * then the corresponding report will be filtered as described in
-   * {@link #getApplicationReport(GetApplicationReportRequest)}.
-   * </p>
-   * 
-   * @param request
-   *          request for reports on all the applications
-   * @return report on applications matching the given application types defined
-   *         in the request
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  public GetApplicationsResponse
-      getApplications(GetApplicationsRequest request) throws YarnException,
-          IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of an Application Attempt
-   * from the <code>ApplicationHistoryServer</code>.
-   * </p>
-   * 
-   * <p>
-   * The client, via {@link GetApplicationAttemptReportRequest} provides the
-   * {@link ApplicationAttemptId} of the application attempt.
-   * </p>
-   * 
-   * <p>
-   * In secure mode,the <code>ApplicationHistoryServer</code> verifies access to
-   * the method before accepting the request.
-   * </p>
-   * 
-   * <p>
-   * The <code>ApplicationHistoryServer</code> responds with a
-   * {@link GetApplicationAttemptReportResponse} which includes the
-   * {@link ApplicationAttemptReport} for the application attempt.
-   * </p>
-   * 
-   * <p>
-   * If the user does not have <code>VIEW_APP</code> access then the following
-   * fields in the report will be set to stubbed values:
-   * <ul>
-   * <li>host</li>
-   * <li>RPC port</li>
-   * <li>client token</li>
-   * <li>diagnostics - set to "N/A"</li>
-   * <li>tracking URL</li>
-   * </ul>
-   * </p>
-   * 
-   * @param request
-   *          request for an application attempt report
-   * @return application attempt report
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  public GetApplicationAttemptReportResponse getApplicationAttemptReport(
-      GetApplicationAttemptReportRequest request) throws YarnException,
-      IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of all Application attempts
-   * in the cluster from the <code>ApplicationHistoryServer</code>.
-   * </p>
-   * 
-   * <p>
-   * The <code>ApplicationHistoryServer</code> responds with a
-   * {@link GetApplicationAttemptsRequest} which includes the
-   * {@link ApplicationAttemptReport} for all the applications attempts of a
-   * specified application attempt.
-   * </p>
-   * 
-   * <p>
-   * If the user does not have <code>VIEW_APP</code> access for an application
-   * then the corresponding report will be filtered as described in
-   * {@link #getApplicationAttemptReport(GetApplicationAttemptReportRequest)}.
-   * </p>
-   * 
-   * @param request
-   *          request for reports on all application attempts of an application
-   * @return reports on all application attempts of an application
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  public GetApplicationAttemptsResponse getApplicationAttempts(
-      GetApplicationAttemptsRequest request) throws YarnException, IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of an Container from the
-   * <code>ApplicationHistoryServer</code>.
-   * </p>
-   * 
-   * <p>
-   * The client, via {@link GetContainerReportRequest} provides the
-   * {@link ContainerId} of the container.
-   * </p>
-   * 
-   * <p>
-   * In secure mode,the <code>ApplicationHistoryServer</code> verifies access to
-   * the method before accepting the request.
-   * </p>
-   * 
-   * <p>
-   * The <code>ApplicationHistoryServer</code> responds with a
-   * {@link GetContainerReportResponse} which includes the
-   * {@link ContainerReport} for the container.
-   * </p>
-   * 
-   * @param request
-   *          request for a container report
-   * @return container report
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  public GetContainerReportResponse getContainerReport(
-      GetContainerReportRequest request) throws YarnException, IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of Containers for an
-   * application attempt from the <code>ApplciationHistoryServer</code>.
-   * </p>
-   * 
-   * <p>
-   * The client, via {@link GetContainersRequest} provides the
-   * {@link ApplicationAttemptId} of the application attempt.
-   * </p>
-   * 
-   * <p>
-   * In secure mode,the <code>ApplicationHistoryServer</code> verifies access to
-   * the method before accepting the request.
-   * </p>
-   * 
-   * <p>
-   * The <code>ApplicationHistoryServer</code> responds with a
-   * {@link GetContainersResponse} which includes a list of
-   * {@link ContainerReport} for all the containers of a specific application
-   * attempt.
-   * </p>
-   * 
-   * @param request
-   *          request for a list of container reports of an application attempt.
-   * @return reports on all containers of an application attempt
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  public GetContainersResponse getContainers(GetContainersRequest request)
-      throws YarnException, IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get delegation token, enabling the
-   * containers to be able to talk to the service using those tokens.
-   * </p>
-   * 
-   * <p>
-   * The <code>ApplicationHistoryServer</code> responds with the delegation
-   * token {@link Token} that can be used by the client to speak to this
-   * service.
-   * </p>
-   * 
-   * @param request
-   *          request to get a delegation token for the client.
-   * @return delegation token that can be used to talk to this service
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  public GetDelegationTokenResponse getDelegationToken(
-      GetDelegationTokenRequest request) throws YarnException, IOException;
-
-  /**
-   * Renew an existing delegation token.
-   * 
-   * @param request
-   *          the delegation token to be renewed.
-   * @return the new expiry time for the delegation token.
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Private
-  @Unstable
-  public RenewDelegationTokenResponse renewDelegationToken(
-      RenewDelegationTokenRequest request) throws YarnException, IOException;
-
-  /**
-   * Cancel an existing delegation token.
-   * 
-   * @param request
-   *          the delegation token to be cancelled.
-   * @return an empty response.
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Private
-  @Unstable
-  public CancelDelegationTokenResponse cancelDelegationToken(
-      CancelDelegationTokenRequest request) throws YarnException, IOException;
+public interface ApplicationHistoryProtocol extends ApplicationBaseProtocol {
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
index 7e836b5..b04bc5d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
@@ -81,7 +81,11 @@ public class ResponseInfo implements Iterable<ResponseInfo.Item> {
   }
 
   public ResponseInfo _(String key, String url, Object anchor) {
-    items.add(Item.of(key, url, anchor));
+    if (url == null) {
+      items.add(Item.of(key, anchor, false));
+    } else {
+      items.add(Item.of(key, url, anchor));
+    }
     return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
index 62c3c7a..1200690 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
@@ -22,6 +22,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
 
 @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
 public interface YarnWebParams {
+  static final String RM_WEB_UI = "ResourceManager";
+  static final String APP_HISTORY_WEB_UI = "ApplicationHistoryServer";
+  
   String NM_NODENAME = "nm.id";
   String APPLICATION_ID = "app.id";
   String APPLICATION_ATTEMPT_ID = "appattempt.id";
@@ -33,4 +36,5 @@ public interface YarnWebParams {
   String QUEUE_NAME = "queue.name";
   String NODE_STATE = "node.state";
   String NODE_LABEL = "node.label";
+  String WEB_UI_TYPE = "web.ui.type";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java
index 6ee0d1c..a785c0c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
 public abstract class HtmlBlock extends TextView implements SubView {
 
+  protected static final String UNAVAILABLE = "N/A";
+
   public class Block extends Hamlet {
     Block(PrintWriter out, int level, boolean wasInline) {
       super(out, level, wasInline);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
index 8da1ea1..e64ca14 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
@@ -56,27 +56,23 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
-import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
-import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.server.timeline.security.authorize.TimelinePolicyProvider;
 
 import com.google.common.base.Preconditions;
 
-public class ApplicationHistoryClientService extends AbstractService {
+public class ApplicationHistoryClientService extends AbstractService implements
+    ApplicationHistoryProtocol {
   private static final Log LOG = LogFactory
     .getLog(ApplicationHistoryClientService.class);
   private ApplicationHistoryManager history;
-  private ApplicationHistoryProtocol protocolHandler;
   private Server server;
   private InetSocketAddress bindAddress;
 
   public ApplicationHistoryClientService(ApplicationHistoryManager history) {
     super("ApplicationHistoryClientService");
     this.history = history;
-    this.protocolHandler = new ApplicationHSClientProtocolHandler();
   }
 
   protected void serviceStart() throws Exception {
@@ -95,7 +91,7 @@ public class ApplicationHistoryClientService extends AbstractService {
         YarnConfiguration.TIMELINE_SERVICE_HANDLER_THREAD_COUNT);
 
     server =
-        rpc.getServer(ApplicationHistoryProtocol.class, protocolHandler,
+        rpc.getServer(ApplicationHistoryProtocol.class, this,
           address, conf, null, conf.getInt(
             YarnConfiguration.TIMELINE_SERVICE_HANDLER_THREAD_COUNT,
             YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_THREAD_COUNT));
@@ -127,11 +123,6 @@ public class ApplicationHistoryClientService extends AbstractService {
   }
 
   @Private
-  public ApplicationHistoryProtocol getClientHandler() {
-    return this.protocolHandler;
-  }
-
-  @Private
   public InetSocketAddress getBindAddress() {
     return this.bindAddress;
   }
@@ -141,109 +132,97 @@ public class ApplicationHistoryClientService extends AbstractService {
     this.server.refreshServiceAcl(configuration, policyProvider);
   }
 
-  private class ApplicationHSClientProtocolHandler implements
-      ApplicationHistoryProtocol {
-
-    @Override
-    public CancelDelegationTokenResponse cancelDelegationToken(
-        CancelDelegationTokenRequest request) throws YarnException, IOException {
-      // TODO Auto-generated method stub
-      return null;
-    }
-
-    @Override
-    public GetApplicationAttemptReportResponse getApplicationAttemptReport(
-        GetApplicationAttemptReportRequest request) throws YarnException,
-        IOException {
-      ApplicationAttemptId appAttemptId = request.getApplicationAttemptId();
-      try {
-        GetApplicationAttemptReportResponse response =
-            GetApplicationAttemptReportResponse.newInstance(history
-              .getApplicationAttempt(appAttemptId));
-        return response;
-      } catch (IOException e) {
-        String msg = "ApplicationAttempt with id '" + appAttemptId +
-            "' doesn't exist in the history store.";
-        LOG.error(msg, e);
-        throw new ApplicationAttemptNotFoundException(msg);
-      }
-    }
+  @Override
+  public CancelDelegationTokenResponse cancelDelegationToken(
+      CancelDelegationTokenRequest request) throws YarnException, IOException {
+    // TODO Auto-generated method stub
+    return null;
+  }
 
-    @Override
-    public GetApplicationAttemptsResponse getApplicationAttempts(
-        GetApplicationAttemptsRequest request) throws YarnException,
-        IOException {
-      GetApplicationAttemptsResponse response =
-          GetApplicationAttemptsResponse
-            .newInstance(new ArrayList<ApplicationAttemptReport>(history
-              .getApplicationAttempts(request.getApplicationId()).values()));
+  @Override
+  public GetApplicationAttemptReportResponse getApplicationAttemptReport(
+      GetApplicationAttemptReportRequest request) throws YarnException,
+      IOException {
+    ApplicationAttemptId appAttemptId = request.getApplicationAttemptId();
+    try {
+      GetApplicationAttemptReportResponse response =
+          GetApplicationAttemptReportResponse.newInstance(history
+            .getApplicationAttempt(appAttemptId));
       return response;
+    } catch (IOException e) {
+      LOG.error(e.getMessage(), e);
+      throw e;
     }
+  }
 
-    @Override
-    public GetApplicationReportResponse getApplicationReport(
-        GetApplicationReportRequest request) throws YarnException, IOException {
-      ApplicationId applicationId = request.getApplicationId();
-      try {
-        GetApplicationReportResponse response =
-            GetApplicationReportResponse.newInstance(history
-              .getApplication(applicationId));
-        return response;
-      } catch (IOException e) {
-        String msg = "Application with id '" + applicationId +
-            "' doesn't exist in the history store.";
-        LOG.error(msg, e);
-        throw new ApplicationNotFoundException(msg);
-      }
-    }
+  @Override
+  public GetApplicationAttemptsResponse getApplicationAttempts(
+      GetApplicationAttemptsRequest request) throws YarnException, IOException {
+    GetApplicationAttemptsResponse response =
+        GetApplicationAttemptsResponse
+          .newInstance(new ArrayList<ApplicationAttemptReport>(history
+            .getApplicationAttempts(request.getApplicationId()).values()));
+    return response;
+  }
 
-    @Override
-    public GetApplicationsResponse getApplications(
-        GetApplicationsRequest request) throws YarnException, IOException {
-      GetApplicationsResponse response =
-          GetApplicationsResponse.newInstance(new ArrayList<ApplicationReport>(
-            history.getAllApplications().values()));
+  @Override
+  public GetApplicationReportResponse getApplicationReport(
+      GetApplicationReportRequest request) throws YarnException, IOException {
+    ApplicationId applicationId = request.getApplicationId();
+    try {
+      GetApplicationReportResponse response =
+          GetApplicationReportResponse.newInstance(history
+            .getApplication(applicationId));
       return response;
+    } catch (IOException e) {
+      LOG.error(e.getMessage(), e);
+      throw e;
     }
+  }
 
-    @Override
-    public GetContainerReportResponse getContainerReport(
-        GetContainerReportRequest request) throws YarnException, IOException {
-      ContainerId containerId = request.getContainerId();
-      try {
-        GetContainerReportResponse response =
-            GetContainerReportResponse.newInstance(
-                history.getContainer(containerId));
-        return response;
-      } catch (IOException e) {
-        String msg = "Container with id '" + containerId +
-            "' doesn't exist in the history store.";
-        LOG.error(msg, e);
-        throw new ContainerNotFoundException(msg);
-      }
-    }
+  @Override
+  public GetApplicationsResponse
+      getApplications(GetApplicationsRequest request) throws YarnException,
+          IOException {
+    GetApplicationsResponse response =
+        GetApplicationsResponse.newInstance(new ArrayList<ApplicationReport>(
+          history.getAllApplications().values()));
+    return response;
+  }
 
-    @Override
-    public GetContainersResponse getContainers(GetContainersRequest request)
-        throws YarnException, IOException {
-      GetContainersResponse response =
-          GetContainersResponse.newInstance(new ArrayList<ContainerReport>(
-            history.getContainers(request.getApplicationAttemptId()).values()));
+  @Override
+  public GetContainerReportResponse getContainerReport(
+      GetContainerReportRequest request) throws YarnException, IOException {
+    ContainerId containerId = request.getContainerId();
+    try {
+      GetContainerReportResponse response =
+          GetContainerReportResponse.newInstance(history
+            .getContainer(containerId));
       return response;
+    } catch (IOException e) {
+      LOG.error(e.getMessage(), e);
+      throw e;
     }
+  }
 
-    @Override
-    public GetDelegationTokenResponse getDelegationToken(
-        GetDelegationTokenRequest request) throws YarnException, IOException {
-      // TODO Auto-generated method stub
-      return null;
-    }
+  @Override
+  public GetContainersResponse getContainers(GetContainersRequest request)
+      throws YarnException, IOException {
+    GetContainersResponse response =
+        GetContainersResponse.newInstance(new ArrayList<ContainerReport>(
+          history.getContainers(request.getApplicationAttemptId()).values()));
+    return response;
+  }
 
-    @Override
-    public RenewDelegationTokenResponse renewDelegationToken(
-        RenewDelegationTokenRequest request) throws YarnException, IOException {
-      // TODO Auto-generated method stub
-      return null;
-    }
+  @Override
+  public GetDelegationTokenResponse getDelegationToken(
+      GetDelegationTokenRequest request) throws YarnException, IOException {
+    return null;
+  }
+
+  @Override
+  public RenewDelegationTokenResponse renewDelegationToken(
+      RenewDelegationTokenRequest request) throws YarnException, IOException {
+    return null;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java
index db25d29..041c31b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java
@@ -18,11 +18,125 @@
 
 package org.apache.hadoop.yarn.server.applicationhistoryservice;
 
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerReport;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+@Private
+@Unstable
+public interface ApplicationHistoryManager {
+  /**
+   * This method returns Application {@link ApplicationReport} for the specified
+   * {@link ApplicationId}.
+   * 
+   * @param appId
+   * 
+   * @return {@link ApplicationReport} for the ApplicationId.
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  ApplicationReport getApplication(ApplicationId appId) throws YarnException,
+      IOException;
+
+  /**
+   * This method returns all Application {@link ApplicationReport}s
+   * 
+   * @return map of {@link ApplicationId} to {@link ApplicationReport}s.
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  Map<ApplicationId, ApplicationReport> getAllApplications()
+      throws YarnException, IOException;
+
+  /**
+   * Application can have multiple application attempts
+   * {@link ApplicationAttemptReport}. This method returns the all
+   * {@link ApplicationAttemptReport}s for the Application.
+   * 
+   * @param appId
+   * 
+   * @return all {@link ApplicationAttemptReport}s for the Application.
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  Map<ApplicationAttemptId, ApplicationAttemptReport> getApplicationAttempts(
+      ApplicationId appId) throws YarnException, IOException;
+
+  /**
+   * This method returns {@link ApplicationAttemptReport} for specified
+   * {@link ApplicationId}.
+   * 
+   * @param appAttemptId
+   *          {@link ApplicationAttemptId}
+   * @return {@link ApplicationAttemptReport} for ApplicationAttemptId
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  ApplicationAttemptReport getApplicationAttempt(
+      ApplicationAttemptId appAttemptId) throws YarnException, IOException;
+
+  /**
+   * This method returns {@link ContainerReport} for specified
+   * {@link ContainerId}.
+   * 
+   * @param containerId
+   *          {@link ContainerId}
+   * @return {@link ContainerReport} for ContainerId
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  ContainerReport getContainer(ContainerId containerId) throws YarnException,
+      IOException;
+
+  /**
+   * This method returns {@link ContainerReport} for specified
+   * {@link ApplicationAttemptId}.
+   * 
+   * @param appAttemptId
+   *          {@link ApplicationAttemptId}
+   * @return {@link ContainerReport} for ApplicationAttemptId
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  ContainerReport getAMContainer(ApplicationAttemptId appAttemptId)
+      throws YarnException, IOException;
+
+  /**
+   * This method returns Map of {@link ContainerId} to {@link ContainerReport}
+   * for specified {@link ApplicationAttemptId}.
+   * 
+   * @param appAttemptId
+   *          {@link ApplicationAttemptId}
+   * @return Map of {@link ContainerId} to {@link ContainerReport} for
+   *         ApplicationAttemptId
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  Map<ContainerId, ContainerReport> getContainers(
+      ApplicationAttemptId appAttemptId) throws YarnException, IOException;
 
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public interface ApplicationHistoryManager extends ApplicationContext {
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
index 0bafd36..88cd153 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
@@ -272,7 +272,7 @@ public class ApplicationHistoryServer extends CompositeService {
             .$for("applicationhistory", ApplicationHistoryClientService.class,
                 ahsClientService, "ws")
             .with(conf).at(bindAddress).start(
-                new AHSWebApp(timelineDataManager, historyManager));
+                new AHSWebApp(timelineDataManager, ahsClientService));
     } catch (Exception e) {
       String msg = "AHSWebApp failed to start.";
       LOG.error(msg, e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
index 4baa75d..152364e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
@@ -25,9 +25,8 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION_ID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
-
 import org.apache.hadoop.yarn.server.webapp.AppsBlock;
+import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout;
 
@@ -41,7 +40,7 @@ public class AHSView extends TwoColumnLayout {
   protected void preHead(Page.HTML<_> html) {
     commonPreHead(html);
     set(DATATABLES_ID, "apps");
-    set(initID(DATATABLES, "apps"), appsTableInit());
+    set(initID(DATATABLES, "apps"), WebPageUtils.appsTableInit());
     setTableStyles(html, "apps", ".queue {width:6em}", ".ui {width:8em}");
 
     // Set the correct title.
@@ -64,27 +63,4 @@ public class AHSView extends TwoColumnLayout {
   protected Class<? extends SubView> content() {
     return AppsBlock.class;
   }
-
-  private String appsTableInit() {
-    // id, user, name, queue, starttime, finishtime, state, status, progress, ui
-    return tableInit().append(", 'aaData': appsTableData")
-      .append(", bDeferRender: true").append(", bProcessing: true")
-
-      .append("\n, aoColumnDefs: ").append(getAppsTableColumnDefs())
-
-      // Sort by id upon page load
-      .append(", aaSorting: [[0, 'desc']]}").toString();
-  }
-
-  protected String getAppsTableColumnDefs() {
-    StringBuilder sb = new StringBuilder();
-    return sb.append("[\n").append("{'sType':'numeric', 'aTargets': [0]")
-      .append(", 'mRender': parseHadoopID }")
-
-      .append("\n, {'sType':'numeric', 'aTargets': [5, 6]")
-      .append(", 'mRender': renderHadoopDate }")
-
-      .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets': [9]")
-      .append(", 'mRender': parseHadoopProgress }]").toString();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java
index 814752b..4b579c6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
 
 import static org.apache.hadoop.yarn.util.StringHelper.pajoin;
 
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManager;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryClientService;
 import org.apache.hadoop.yarn.server.timeline.TimelineDataManager;
 import org.apache.hadoop.yarn.server.timeline.webapp.TimelineWebServices;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
@@ -30,17 +30,17 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams;
 
 public class AHSWebApp extends WebApp implements YarnWebParams {
 
-  private ApplicationHistoryManager applicationHistoryManager;
+  private final ApplicationHistoryClientService historyClientService;
   private TimelineDataManager timelineDataManager;
 
   public AHSWebApp(TimelineDataManager timelineDataManager,
-      ApplicationHistoryManager applicationHistoryManager) {
+      ApplicationHistoryClientService historyClientService) {
     this.timelineDataManager = timelineDataManager;
-    this.applicationHistoryManager = applicationHistoryManager;
+    this.historyClientService = historyClientService;
   }
 
-  public ApplicationHistoryManager getApplicationHistoryManager() {
-    return applicationHistoryManager;
+  public ApplicationHistoryClientService getApplicationHistoryClientService() {
+    return historyClientService;
   }
 
   public TimelineDataManager getTimelineDataManager() {
@@ -53,7 +53,7 @@ public class AHSWebApp extends WebApp implements YarnWebParams {
     bind(AHSWebServices.class);
     bind(TimelineWebServices.class);
     bind(GenericExceptionHandler.class);
-    bind(ApplicationContext.class).toInstance(applicationHistoryManager);
+    bind(ApplicationBaseProtocol.class).toInstance(historyClientService);
     bind(TimelineDataManager.class).toInstance(timelineDataManager);
     route("/", AHSController.class);
     route(pajoin("/apps", APP_STATE), AHSController.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index 2af4027..9edc9ab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -33,7 +33,7 @@ import javax.ws.rs.core.MediaType;
 
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.server.webapp.WebServices;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo;
@@ -51,8 +51,8 @@ import com.google.inject.Singleton;
 public class AHSWebServices extends WebServices {
 
   @Inject
-  public AHSWebServices(ApplicationContext appContext) {
-    super(appContext);
+  public AHSWebServices(ApplicationBaseProtocol appBaseProt) {
+    super(appBaseProt);
   }
 
   @GET

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
index 63b44bd..1e0a342 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
@@ -21,9 +21,8 @@ import static org.apache.hadoop.yarn.util.StringHelper.join;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
-
 import org.apache.hadoop.yarn.server.webapp.AppAttemptBlock;
+import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 
@@ -41,7 +40,7 @@ public class AppAttemptPage extends AHSView {
             $(YarnWebParams.APPLICATION_ATTEMPT_ID)));
 
     set(DATATABLES_ID, "containers");
-    set(initID(DATATABLES, "containers"), containersTableInit());
+    set(initID(DATATABLES, "containers"), WebPageUtils.containersTableInit());
     setTableStyles(html, "containers", ".queue {width:6em}", ".ui {width:8em}");
   }
 
@@ -50,16 +49,6 @@ public class AppAttemptPage extends AHSView {
     return AppAttemptBlock.class;
   }
 
-  private String containersTableInit() {
-    return tableInit().append(", 'aaData': containersTableData")
-      .append(", bDeferRender: true").append(", bProcessing: true")
-
-      .append("\n, aoColumnDefs: ").append(getContainersTableColumnDefs())
-
-      // Sort by id upon page load
-      .append(", aaSorting: [[0, 'desc']]}").toString();
-  }
-
   protected String getContainersTableColumnDefs() {
     StringBuilder sb = new StringBuilder();
     return sb.append("[\n").append("{'sType':'numeric', 'aTargets': [0]")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
index 96ca659..cf92c1d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
@@ -22,9 +22,8 @@ import static org.apache.hadoop.yarn.util.StringHelper.join;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
-
 import org.apache.hadoop.yarn.server.webapp.AppBlock;
+import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 
@@ -40,9 +39,13 @@ public class AppPage extends AHSView {
       appId.isEmpty() ? "Bad request: missing application ID" : join(
         "Application ", $(YarnWebParams.APPLICATION_ID)));
 
-    set(DATATABLES_ID, "attempts");
-    set(initID(DATATABLES, "attempts"), attemptsTableInit());
+    set(DATATABLES_ID, "attempts ResourceRequests");
+    set(initID(DATATABLES, "attempts"), WebPageUtils.attemptsTableInit());
     setTableStyles(html, "attempts", ".queue {width:6em}", ".ui {width:8em}");
+
+    setTableStyles(html, "ResourceRequests");
+
+    set(YarnWebParams.WEB_UI_TYPE, YarnWebParams.APP_HISTORY_WEB_UI);
   }
 
   @Override
@@ -50,16 +53,6 @@ public class AppPage extends AHSView {
     return AppBlock.class;
   }
 
-  private String attemptsTableInit() {
-    return tableInit().append(", 'aaData': attemptsTableData")
-      .append(", bDeferRender: true").append(", bProcessing: true")
-
-      .append("\n, aoColumnDefs: ").append(getAttemptsTableColumnDefs())
-
-      // Sort by id upon page load
-      .append(", aaSorting: [[0, 'desc']]}").toString();
-  }
-
   protected String getAttemptsTableColumnDefs() {
     StringBuilder sb = new StringBuilder();
     return sb.append("[\n").append("{'sType':'numeric', 'aTargets': [0]")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
index 32d011e..d03b26d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
@@ -77,7 +77,7 @@ public class TestApplicationHistoryClientService {
     GetApplicationReportRequest request =
         GetApplicationReportRequest.newInstance(appId);
     GetApplicationReportResponse response =
-        clientService.getClientHandler().getApplicationReport(request);
+        clientService.getApplicationReport(request);
     ApplicationReport appReport = response.getApplicationReport();
     Assert.assertNotNull(appReport);
     Assert.assertEquals(123, appReport.getApplicationResourceUsageReport()
@@ -98,7 +98,7 @@ public class TestApplicationHistoryClientService {
     ApplicationId appId1 = ApplicationId.newInstance(0, 2);
     GetApplicationsRequest request = GetApplicationsRequest.newInstance();
     GetApplicationsResponse response =
-        clientService.getClientHandler().getApplications(request);
+        clientService.getApplications(request);
     List<ApplicationReport> appReport = response.getApplicationList();
     Assert.assertNotNull(appReport);
     Assert.assertEquals(appId, appReport.get(0).getApplicationId());
@@ -113,7 +113,7 @@ public class TestApplicationHistoryClientService {
     GetApplicationAttemptReportRequest request =
         GetApplicationAttemptReportRequest.newInstance(appAttemptId);
     GetApplicationAttemptReportResponse response =
-        clientService.getClientHandler().getApplicationAttemptReport(request);
+        clientService.getApplicationAttemptReport(request);
     ApplicationAttemptReport attemptReport =
         response.getApplicationAttemptReport();
     Assert.assertNotNull(attemptReport);
@@ -131,7 +131,7 @@ public class TestApplicationHistoryClientService {
     GetApplicationAttemptsRequest request =
         GetApplicationAttemptsRequest.newInstance(appId);
     GetApplicationAttemptsResponse response =
-        clientService.getClientHandler().getApplicationAttempts(request);
+        clientService.getApplicationAttempts(request);
     List<ApplicationAttemptReport> attemptReports =
         response.getApplicationAttemptList();
     Assert.assertNotNull(attemptReports);
@@ -150,7 +150,7 @@ public class TestApplicationHistoryClientService {
     GetContainerReportRequest request =
         GetContainerReportRequest.newInstance(containerId);
     GetContainerReportResponse response =
-        clientService.getClientHandler().getContainerReport(request);
+        clientService.getContainerReport(request);
     ContainerReport container = response.getContainerReport();
     Assert.assertNotNull(container);
     Assert.assertEquals(containerId, container.getContainerId());
@@ -169,7 +169,7 @@ public class TestApplicationHistoryClientService {
     GetContainersRequest request =
         GetContainersRequest.newInstance(appAttemptId);
     GetContainersResponse response =
-        clientService.getClientHandler().getContainers(request);
+        clientService.getContainers(request);
     List<ContainerReport> containers = response.getContainerList();
     Assert.assertNotNull(containers);
     Assert.assertEquals(containerId, containers.get(0).getContainerId());


[43/49] hadoop git commit: HADOOP-10027. *Compressor_deflateBytesDirect passes instance instead of jclass to GetStaticObjectField. Contributed by Hui Zheng.

Posted by zj...@apache.org.
HADOOP-10027. *Compressor_deflateBytesDirect passes instance instead of jclass to GetStaticObjectField. Contributed by Hui Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff83ae72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff83ae72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff83ae72

Branch: refs/heads/YARN-2928
Commit: ff83ae72318fe6c0f266a7bf08138bd2fdb51cbd
Parents: 85f6d67
Author: cnauroth <cn...@apache.org>
Authored: Wed Mar 11 23:27:49 2015 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Wed Mar 11 23:27:49 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../io/compress/bzip2/Bzip2Compressor.java      |   3 -
 .../io/compress/bzip2/Bzip2Decompressor.java    |   3 -
 .../hadoop/io/compress/lz4/Lz4Compressor.java   |   4 -
 .../hadoop/io/compress/lz4/Lz4Decompressor.java |   4 -
 .../io/compress/snappy/SnappyCompressor.java    |   4 -
 .../io/compress/snappy/SnappyDecompressor.java  |   4 -
 .../hadoop/io/compress/zlib/ZlibCompressor.java |   3 -
 .../io/compress/zlib/ZlibDecompressor.java      |   5 +-
 .../hadoop/io/compress/bzip2/Bzip2Compressor.c  |   9 +-
 .../io/compress/bzip2/Bzip2Decompressor.c       |   7 --
 .../hadoop/io/compress/lz4/Lz4Compressor.c      |  13 ---
 .../hadoop/io/compress/lz4/Lz4Decompressor.c    |   8 --
 .../io/compress/snappy/SnappyCompressor.c       |   8 --
 .../io/compress/snappy/SnappyDecompressor.c     |   8 --
 .../hadoop/io/compress/zlib/ZlibCompressor.c    |  11 --
 .../hadoop/io/compress/zlib/ZlibDecompressor.c  |  10 --
 .../bzip2/TestBzip2CompressorDecompressor.java  | 104 +++++++++++++++++++
 .../lz4/TestLz4CompressorDecompressor.java      |  17 +++
 .../TestSnappyCompressorDecompressor.java       |  17 +++
 .../zlib/TestZlibCompressorDecompressor.java    |  17 +++
 21 files changed, 160 insertions(+), 102 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index fb699bc..40b5cd0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -450,6 +450,9 @@ Release 2.8.0 - UNRELEASED
 
   BUG FIXES
 
+    HADOOP-10027. *Compressor_deflateBytesDirect passes instance instead of
+    jclass to GetStaticObjectField. (Hui Zheng via cnauroth)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java
index 0f333bb..a973dc9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java
@@ -44,9 +44,6 @@ public class Bzip2Compressor implements Compressor {
 
   private static final Log LOG = LogFactory.getLog(Bzip2Compressor.class);
 
-  // HACK - Use this as a global lock in the JNI layer.
-  private static Class<Bzip2Compressor> clazz = Bzip2Compressor.class;
-
   private long stream;
   private int blockSize;
   private int workFactor;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java
index 6720902..3135165 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java
@@ -38,9 +38,6 @@ public class Bzip2Decompressor implements Decompressor {
   
   private static final Log LOG = LogFactory.getLog(Bzip2Decompressor.class);
 
-  // HACK - Use this as a global lock in the JNI layer.
-  private static Class<Bzip2Decompressor> clazz = Bzip2Decompressor.class;
-  
   private long stream;
   private boolean conserveMemory;
   private int directBufferSize;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java
index b5db99f..ccfae8b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java
@@ -37,10 +37,6 @@ public class Lz4Compressor implements Compressor {
       LogFactory.getLog(Lz4Compressor.class.getName());
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
 
-  // HACK - Use this as a global lock in the JNI layer
-  @SuppressWarnings({"unchecked", "unused"})
-  private static Class clazz = Lz4Compressor.class;
-
   private int directBufferSize;
   private Buffer compressedDirectBuf = null;
   private int uncompressedDirectBufLen;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java
index 22a3118..685956c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java
@@ -36,10 +36,6 @@ public class Lz4Decompressor implements Decompressor {
       LogFactory.getLog(Lz4Compressor.class.getName());
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
 
-  // HACK - Use this as a global lock in the JNI layer
-  @SuppressWarnings({"unchecked", "unused"})
-  private static Class clazz = Lz4Decompressor.class;
-
   private int directBufferSize;
   private Buffer compressedDirectBuf = null;
   private int compressedDirectBufLen;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java
index ab45f25..814718d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java
@@ -37,10 +37,6 @@ public class SnappyCompressor implements Compressor {
       LogFactory.getLog(SnappyCompressor.class.getName());
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
 
-  // HACK - Use this as a global lock in the JNI layer
-  @SuppressWarnings({"unchecked", "unused"})
-  private static Class clazz = SnappyCompressor.class;
-
   private int directBufferSize;
   private Buffer compressedDirectBuf = null;
   private int uncompressedDirectBufLen;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
index b5f5acf..dbffba8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
@@ -37,10 +37,6 @@ public class SnappyDecompressor implements Decompressor {
       LogFactory.getLog(SnappyCompressor.class.getName());
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
 
-  // HACK - Use this as a global lock in the JNI layer
-  @SuppressWarnings({"unchecked", "unused"})
-  private static Class clazz = SnappyDecompressor.class;
-
   private int directBufferSize;
   private Buffer compressedDirectBuf = null;
   private int compressedDirectBufLen;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
index 6799403..b955044 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
@@ -41,9 +41,6 @@ public class ZlibCompressor implements Compressor {
 
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024;
 
-  // HACK - Use this as a global lock in the JNI layer
-  private static Class clazz = ZlibCompressor.class;
-
   private long stream;
   private CompressionLevel level;
   private CompressionStrategy strategy;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
index 89c879a..d728fad 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
@@ -34,10 +34,7 @@ import org.apache.hadoop.util.NativeCodeLoader;
  */
 public class ZlibDecompressor implements Decompressor {
   private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024;
-  
-  // HACK - Use this as a global lock in the JNI layer
-  private static Class clazz = ZlibDecompressor.class;
-  
+
   private long stream;
   private CompressionHeader header;
   private int directBufferSize;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.c
index ef81bea..a92123e 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.c
@@ -25,7 +25,6 @@
 #include "org_apache_hadoop_io_compress_bzip2.h"
 #include "org_apache_hadoop_io_compress_bzip2_Bzip2Compressor.h"
 
-static jfieldID Bzip2Compressor_clazz;
 static jfieldID Bzip2Compressor_stream;
 static jfieldID Bzip2Compressor_uncompressedDirectBuf;
 static jfieldID Bzip2Compressor_uncompressedDirectBufOff;
@@ -74,8 +73,6 @@ Java_org_apache_hadoop_io_compress_bzip2_Bzip2Compressor_initIDs(
                         "BZ2_bzCompressEnd");
 
     // Initialize the requisite fieldIds.
-    Bzip2Compressor_clazz = (*env)->GetStaticFieldID(env, class, "clazz", 
-                                                     "Ljava/lang/Class;");
     Bzip2Compressor_stream = (*env)->GetFieldID(env, class, "stream", "J");
     Bzip2Compressor_finish = (*env)->GetFieldID(env, class, "finish", "Z");
     Bzip2Compressor_finished = (*env)->GetFieldID(env, class, "finished", "Z");
@@ -155,9 +152,7 @@ Java_org_apache_hadoop_io_compress_bzip2_Bzip2Compressor_deflateBytesDirect(
         return (jint)0;
     } 
 
-    jobject clazz = (*env)->GetStaticObjectField(env, this, 
-                                                 Bzip2Compressor_clazz);
-    jobject uncompressed_direct_buf = (*env)->GetObjectField(env, this, 
+    jobject uncompressed_direct_buf = (*env)->GetObjectField(env, this,
                                      Bzip2Compressor_uncompressedDirectBuf);
     jint uncompressed_direct_buf_off = (*env)->GetIntField(env, this, 
                                    Bzip2Compressor_uncompressedDirectBufOff);
@@ -173,12 +168,10 @@ Java_org_apache_hadoop_io_compress_bzip2_Bzip2Compressor_deflateBytesDirect(
                                               Bzip2Compressor_finish);
 
     // Get the input and output direct buffers.
-    LOCK_CLASS(env, clazz, "Bzip2Compressor");
     char* uncompressed_bytes = (*env)->GetDirectBufferAddress(env, 
                                                 uncompressed_direct_buf);
     char* compressed_bytes = (*env)->GetDirectBufferAddress(env, 
                                                 compressed_direct_buf);
-    UNLOCK_CLASS(env, clazz, "Bzip2Compressor");
 
     if (!uncompressed_bytes || !compressed_bytes) {
         return (jint)0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.c
index ad9bcb7..3d9fc08 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.c
@@ -25,7 +25,6 @@
 #include "org_apache_hadoop_io_compress_bzip2.h"
 #include "org_apache_hadoop_io_compress_bzip2_Bzip2Decompressor.h"
 
-static jfieldID Bzip2Decompressor_clazz;
 static jfieldID Bzip2Decompressor_stream;
 static jfieldID Bzip2Decompressor_compressedDirectBuf;
 static jfieldID Bzip2Decompressor_compressedDirectBufOff;
@@ -73,8 +72,6 @@ Java_org_apache_hadoop_io_compress_bzip2_Bzip2Decompressor_initIDs(
                         "BZ2_bzDecompressEnd");
 
     // Initialize the requisite fieldIds.
-    Bzip2Decompressor_clazz = (*env)->GetStaticFieldID(env, class, "clazz", 
-                                                       "Ljava/lang/Class;");
     Bzip2Decompressor_stream = (*env)->GetFieldID(env, class, "stream", "J");
     Bzip2Decompressor_finished = (*env)->GetFieldID(env, class,
                                                     "finished", "Z");
@@ -144,8 +141,6 @@ Java_org_apache_hadoop_io_compress_bzip2_Bzip2Decompressor_inflateBytesDirect(
         return (jint)0;
     } 
 
-    jobject clazz = (*env)->GetStaticObjectField(env, this, 
-                                                 Bzip2Decompressor_clazz);
     jarray compressed_direct_buf = (jarray)(*env)->GetObjectField(env,
                                 this, Bzip2Decompressor_compressedDirectBuf);
     jint compressed_direct_buf_off = (*env)->GetIntField(env, this, 
@@ -159,12 +154,10 @@ Java_org_apache_hadoop_io_compress_bzip2_Bzip2Decompressor_inflateBytesDirect(
                                 Bzip2Decompressor_directBufferSize);
 
     // Get the input and output direct buffers.
-    LOCK_CLASS(env, clazz, "Bzip2Decompressor");
     char* compressed_bytes = (*env)->GetDirectBufferAddress(env, 
                                                 compressed_direct_buf);
     char* uncompressed_bytes = (*env)->GetDirectBufferAddress(env, 
                                                 uncompressed_direct_buf);
-    UNLOCK_CLASS(env, clazz, "Bzip2Decompressor");
 
     if (!compressed_bytes || !uncompressed_bytes) {
         return (jint)0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
index 9f14312..f742384 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
@@ -27,7 +27,6 @@
 #include "lz4hc.h"
 
 
-static jfieldID Lz4Compressor_clazz;
 static jfieldID Lz4Compressor_uncompressedDirectBuf;
 static jfieldID Lz4Compressor_uncompressedDirectBufLen;
 static jfieldID Lz4Compressor_compressedDirectBuf;
@@ -37,8 +36,6 @@ static jfieldID Lz4Compressor_directBufferSize;
 JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_initIDs
 (JNIEnv *env, jclass clazz){
 
-  Lz4Compressor_clazz = (*env)->GetStaticFieldID(env, clazz, "clazz",
-                                                 "Ljava/lang/Class;");
   Lz4Compressor_uncompressedDirectBuf = (*env)->GetFieldID(env, clazz,
                                                            "uncompressedDirectBuf",
                                                            "Ljava/nio/Buffer;");
@@ -57,25 +54,20 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_comp
   char *compressed_bytes;
 
   // Get members of Lz4Compressor
-  jobject clazz = (*env)->GetStaticObjectField(env, thisj, Lz4Compressor_clazz);
   jobject uncompressed_direct_buf = (*env)->GetObjectField(env, thisj, Lz4Compressor_uncompressedDirectBuf);
   jint uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, Lz4Compressor_uncompressedDirectBufLen);
   jobject compressed_direct_buf = (*env)->GetObjectField(env, thisj, Lz4Compressor_compressedDirectBuf);
   jint compressed_direct_buf_len = (*env)->GetIntField(env, thisj, Lz4Compressor_directBufferSize);
 
   // Get the input direct buffer
-  LOCK_CLASS(env, clazz, "Lz4Compressor");
   uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
-  UNLOCK_CLASS(env, clazz, "Lz4Compressor");
 
   if (uncompressed_bytes == 0) {
     return (jint)0;
   }
 
   // Get the output direct buffer
-  LOCK_CLASS(env, clazz, "Lz4Compressor");
   compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
-  UNLOCK_CLASS(env, clazz, "Lz4Compressor");
 
   if (compressed_bytes == 0) {
     return (jint)0;
@@ -104,25 +96,20 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_comp
   char* compressed_bytes = NULL;
 
   // Get members of Lz4Compressor
-  jobject clazz = (*env)->GetStaticObjectField(env, thisj, Lz4Compressor_clazz);
   jobject uncompressed_direct_buf = (*env)->GetObjectField(env, thisj, Lz4Compressor_uncompressedDirectBuf);
   jint uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, Lz4Compressor_uncompressedDirectBufLen);
   jobject compressed_direct_buf = (*env)->GetObjectField(env, thisj, Lz4Compressor_compressedDirectBuf);
   jint compressed_direct_buf_len = (*env)->GetIntField(env, thisj, Lz4Compressor_directBufferSize);
 
   // Get the input direct buffer
-  LOCK_CLASS(env, clazz, "Lz4Compressor");
   uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
-  UNLOCK_CLASS(env, clazz, "Lz4Compressor");
 
   if (uncompressed_bytes == 0) {
     return (jint)0;
   }
 
   // Get the output direct buffer
-  LOCK_CLASS(env, clazz, "Lz4Compressor");
   compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
-  UNLOCK_CLASS(env, clazz, "Lz4Compressor");
 
   if (compressed_bytes == 0) {
     return (jint)0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
index 2b8c91c..cdeaa31 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
@@ -25,7 +25,6 @@
 #include "lz4.h"
 
 
-static jfieldID Lz4Decompressor_clazz;
 static jfieldID Lz4Decompressor_compressedDirectBuf;
 static jfieldID Lz4Decompressor_compressedDirectBufLen;
 static jfieldID Lz4Decompressor_uncompressedDirectBuf;
@@ -34,8 +33,6 @@ static jfieldID Lz4Decompressor_directBufferSize;
 JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Decompressor_initIDs
 (JNIEnv *env, jclass clazz){
 
-  Lz4Decompressor_clazz = (*env)->GetStaticFieldID(env, clazz, "clazz",
-                                                   "Ljava/lang/Class;");
   Lz4Decompressor_compressedDirectBuf = (*env)->GetFieldID(env,clazz,
                                                            "compressedDirectBuf",
                                                            "Ljava/nio/Buffer;");
@@ -54,25 +51,20 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Decompressor_de
   char *uncompressed_bytes;
 
   // Get members of Lz4Decompressor
-  jobject clazz = (*env)->GetStaticObjectField(env,thisj, Lz4Decompressor_clazz);
   jobject compressed_direct_buf = (*env)->GetObjectField(env,thisj, Lz4Decompressor_compressedDirectBuf);
   jint compressed_direct_buf_len = (*env)->GetIntField(env,thisj, Lz4Decompressor_compressedDirectBufLen);
   jobject uncompressed_direct_buf = (*env)->GetObjectField(env,thisj, Lz4Decompressor_uncompressedDirectBuf);
   size_t uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, Lz4Decompressor_directBufferSize);
 
   // Get the input direct buffer
-  LOCK_CLASS(env, clazz, "Lz4Decompressor");
   compressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
-  UNLOCK_CLASS(env, clazz, "Lz4Decompressor");
 
   if (compressed_bytes == 0) {
     return (jint)0;
   }
 
   // Get the output direct buffer
-  LOCK_CLASS(env, clazz, "Lz4Decompressor");
   uncompressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
-  UNLOCK_CLASS(env, clazz, "Lz4Decompressor");
 
   if (uncompressed_bytes == 0) {
     return (jint)0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
index fe827f0..9a09f07 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
@@ -38,7 +38,6 @@
 
 #define JINT_MAX 0x7fffffff
 
-static jfieldID SnappyCompressor_clazz;
 static jfieldID SnappyCompressor_uncompressedDirectBuf;
 static jfieldID SnappyCompressor_uncompressedDirectBufLen;
 static jfieldID SnappyCompressor_compressedDirectBuf;
@@ -84,8 +83,6 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
   LOAD_DYNAMIC_SYMBOL(__dlsym_snappy_compress, dlsym_snappy_compress, env, libsnappy, "snappy_compress");
 #endif
 
-  SnappyCompressor_clazz = (*env)->GetStaticFieldID(env, clazz, "clazz",
-                                                 "Ljava/lang/Class;");
   SnappyCompressor_uncompressedDirectBuf = (*env)->GetFieldID(env, clazz,
                                                            "uncompressedDirectBuf",
                                                            "Ljava/nio/Buffer;");
@@ -104,7 +101,6 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
   char* compressed_bytes;
   snappy_status ret;
   // Get members of SnappyCompressor
-  jobject clazz = (*env)->GetStaticObjectField(env, thisj, SnappyCompressor_clazz);
   jobject uncompressed_direct_buf = (*env)->GetObjectField(env, thisj, SnappyCompressor_uncompressedDirectBuf);
   jint uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen);
   jobject compressed_direct_buf = (*env)->GetObjectField(env, thisj, SnappyCompressor_compressedDirectBuf);
@@ -112,18 +108,14 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
   size_t buf_len;
 
   // Get the input direct buffer
-  LOCK_CLASS(env, clazz, "SnappyCompressor");
   uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
-  UNLOCK_CLASS(env, clazz, "SnappyCompressor");
 
   if (uncompressed_bytes == 0) {
     return (jint)0;
   }
 
   // Get the output direct buffer
-  LOCK_CLASS(env, clazz, "SnappyCompressor");
   compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
-  UNLOCK_CLASS(env, clazz, "SnappyCompressor");
 
   if (compressed_bytes == 0) {
     return (jint)0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
index d1fd13c..69ec101 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
@@ -31,7 +31,6 @@
 
 #include "org_apache_hadoop_io_compress_snappy_SnappyDecompressor.h"
 
-static jfieldID SnappyDecompressor_clazz;
 static jfieldID SnappyDecompressor_compressedDirectBuf;
 static jfieldID SnappyDecompressor_compressedDirectBufLen;
 static jfieldID SnappyDecompressor_uncompressedDirectBuf;
@@ -79,8 +78,6 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompres
   LOAD_DYNAMIC_SYMBOL(__dlsym_snappy_uncompress, dlsym_snappy_uncompress, env, libsnappy, "snappy_uncompress");
 #endif
 
-  SnappyDecompressor_clazz = (*env)->GetStaticFieldID(env, clazz, "clazz",
-                                                   "Ljava/lang/Class;");
   SnappyDecompressor_compressedDirectBuf = (*env)->GetFieldID(env,clazz,
                                                            "compressedDirectBuf",
                                                            "Ljava/nio/Buffer;");
@@ -99,25 +96,20 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompres
   char* uncompressed_bytes = NULL;
   snappy_status ret;
   // Get members of SnappyDecompressor
-  jobject clazz = (*env)->GetStaticObjectField(env,thisj, SnappyDecompressor_clazz);
   jobject compressed_direct_buf = (*env)->GetObjectField(env,thisj, SnappyDecompressor_compressedDirectBuf);
   jint compressed_direct_buf_len = (*env)->GetIntField(env,thisj, SnappyDecompressor_compressedDirectBufLen);
   jobject uncompressed_direct_buf = (*env)->GetObjectField(env,thisj, SnappyDecompressor_uncompressedDirectBuf);
   size_t uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyDecompressor_directBufferSize);
 
   // Get the input direct buffer
-  LOCK_CLASS(env, clazz, "SnappyDecompressor");
   compressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
-  UNLOCK_CLASS(env, clazz, "SnappyDecompressor");
 
   if (compressed_bytes == 0) {
     return (jint)0;
   }
 
   // Get the output direct buffer
-  LOCK_CLASS(env, clazz, "SnappyDecompressor");
   uncompressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
-  UNLOCK_CLASS(env, clazz, "SnappyDecompressor");
 
   if (uncompressed_bytes == 0) {
     return (jint)0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
index f7c0cb9..51f7bed 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
@@ -28,7 +28,6 @@
 #include "org_apache_hadoop_io_compress_zlib.h"
 #include "org_apache_hadoop_io_compress_zlib_ZlibCompressor.h"
 
-static jfieldID ZlibCompressor_clazz;
 static jfieldID ZlibCompressor_stream;
 static jfieldID ZlibCompressor_uncompressedDirectBuf;
 static jfieldID ZlibCompressor_uncompressedDirectBufOff;
@@ -141,8 +140,6 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_initIDs(
 #endif
 
 	// Initialize the requisite fieldIds
-    ZlibCompressor_clazz = (*env)->GetStaticFieldID(env, class, "clazz",
-                                                      "Ljava/lang/Class;");
     ZlibCompressor_stream = (*env)->GetFieldID(env, class, "stream", "J");
     ZlibCompressor_finish = (*env)->GetFieldID(env, class, "finish", "Z");
     ZlibCompressor_finished = (*env)->GetFieldID(env, class, "finished", "Z");
@@ -239,7 +236,6 @@ JNIEXPORT jint JNICALL
 Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_deflateBytesDirect(
 	JNIEnv *env, jobject this
 	) {
-    jobject clazz = NULL;
     jobject uncompressed_direct_buf = NULL;
     jint uncompressed_direct_buf_off = 0;
     jint uncompressed_direct_buf_len = 0;
@@ -260,9 +256,6 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_deflateBytesDirect(
 		return (jint)0;
     }
 
-    // Get members of ZlibCompressor
-    clazz = (*env)->GetStaticObjectField(env, this,
-                                                 ZlibCompressor_clazz);
 	uncompressed_direct_buf = (*env)->GetObjectField(env, this,
 									ZlibCompressor_uncompressedDirectBuf);
 	uncompressed_direct_buf_off = (*env)->GetIntField(env, this,
@@ -278,20 +271,16 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_deflateBytesDirect(
 	finish = (*env)->GetBooleanField(env, this, ZlibCompressor_finish);
 
     // Get the input direct buffer
-    LOCK_CLASS(env, clazz, "ZlibCompressor");
     uncompressed_bytes = (*env)->GetDirectBufferAddress(env,
 											uncompressed_direct_buf);
-    UNLOCK_CLASS(env, clazz, "ZlibCompressor");
 
   	if (uncompressed_bytes == 0) {
     	return (jint)0;
 	}
 
     // Get the output direct buffer
-    LOCK_CLASS(env, clazz, "ZlibCompressor");
     compressed_bytes = (*env)->GetDirectBufferAddress(env,
 										compressed_direct_buf);
-    UNLOCK_CLASS(env, clazz, "ZlibCompressor");
 
   	if (compressed_bytes == 0) {
 		return (jint)0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c
index 8b78f41..b9f23b1 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c
@@ -28,7 +28,6 @@
 #include "org_apache_hadoop_io_compress_zlib.h"
 #include "org_apache_hadoop_io_compress_zlib_ZlibDecompressor.h"
 
-static jfieldID ZlibDecompressor_clazz;
 static jfieldID ZlibDecompressor_stream;
 static jfieldID ZlibDecompressor_compressedDirectBuf;
 static jfieldID ZlibDecompressor_compressedDirectBufOff;
@@ -104,8 +103,6 @@ JNIEnv *env, jclass class
 
 
   // Initialize the requisite fieldIds
-    ZlibDecompressor_clazz = (*env)->GetStaticFieldID(env, class, "clazz",
-                                                      "Ljava/lang/Class;");
     ZlibDecompressor_stream = (*env)->GetFieldID(env, class, "stream", "J");
     ZlibDecompressor_needDict = (*env)->GetFieldID(env, class, "needDict", "Z");
     ZlibDecompressor_finished = (*env)->GetFieldID(env, class, "finished", "Z");
@@ -197,7 +194,6 @@ JNIEXPORT jint JNICALL
 Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_inflateBytesDirect(
 	JNIEnv *env, jobject this
 	) {
-    jobject clazz = NULL;
     jarray compressed_direct_buf = NULL;
     jint compressed_direct_buf_off = 0;
     jint compressed_direct_buf_len = 0;
@@ -218,8 +214,6 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_inflateBytesDirect(
     }
 
     // Get members of ZlibDecompressor
-    clazz = (*env)->GetStaticObjectField(env, this,
-                                                 ZlibDecompressor_clazz);
 	compressed_direct_buf = (jarray)(*env)->GetObjectField(env, this,
 											ZlibDecompressor_compressedDirectBuf);
 	compressed_direct_buf_off = (*env)->GetIntField(env, this,
@@ -233,20 +227,16 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_inflateBytesDirect(
 										ZlibDecompressor_directBufferSize);
 
     // Get the input direct buffer
-    LOCK_CLASS(env, clazz, "ZlibDecompressor");
 	compressed_bytes = (*env)->GetDirectBufferAddress(env,
 										compressed_direct_buf);
-    UNLOCK_CLASS(env, clazz, "ZlibDecompressor");
 
 	if (!compressed_bytes) {
 	    return (jint)0;
 	}
 
     // Get the output direct buffer
-    LOCK_CLASS(env, clazz, "ZlibDecompressor");
 	uncompressed_bytes = (*env)->GetDirectBufferAddress(env,
 											uncompressed_direct_buf);
-    UNLOCK_CLASS(env, clazz, "ZlibDecompressor");
 
 	if (!uncompressed_bytes) {
 	    return (jint)0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/bzip2/TestBzip2CompressorDecompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/bzip2/TestBzip2CompressorDecompressor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/bzip2/TestBzip2CompressorDecompressor.java
new file mode 100644
index 0000000..c585a46
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/bzip2/TestBzip2CompressorDecompressor.java
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.compress.bzip2;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.compress.*;
+import org.apache.hadoop.io.compress.bzip2.Bzip2Compressor;
+import org.apache.hadoop.io.compress.bzip2.Bzip2Decompressor;
+import org.apache.hadoop.test.MultithreadedTestUtil;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.*;
+import java.util.Random;
+
+import static org.junit.Assert.*;
+import static org.junit.Assume.*;
+import static org.junit.Assume.assumeTrue;
+
+public class TestBzip2CompressorDecompressor {
+  
+  private static final Random rnd = new Random(12345l);
+
+  @Before
+  public void before() {
+    assumeTrue(Bzip2Factory.isNativeBzip2Loaded(new Configuration()));
+  }
+
+  // test compress/decompress process 
+  @Test
+  public void testCompressDecompress() {
+    byte[] rawData = null;
+    int rawDataSize = 0;
+    rawDataSize = 1024 * 64;
+    rawData = generate(rawDataSize);
+    try {
+      Bzip2Compressor compressor = new Bzip2Compressor();
+      Bzip2Decompressor decompressor = new Bzip2Decompressor();
+      assertFalse("testBzip2CompressDecompress finished error",
+          compressor.finished());
+      compressor.setInput(rawData, 0, rawData.length);
+      assertTrue("testBzip2CompressDecompress getBytesRead before error",
+          compressor.getBytesRead() == 0);
+      compressor.finish();
+
+      byte[] compressedResult = new byte[rawDataSize];
+      int cSize = compressor.compress(compressedResult, 0, rawDataSize);
+      assertTrue("testBzip2CompressDecompress getBytesRead after error",
+          compressor.getBytesRead() == rawDataSize);
+      assertTrue(
+          "testBzip2CompressDecompress compressed size no less than original size",
+          cSize < rawDataSize);
+      decompressor.setInput(compressedResult, 0, cSize);
+      byte[] decompressedBytes = new byte[rawDataSize];
+      decompressor.decompress(decompressedBytes, 0, decompressedBytes.length);
+      assertArrayEquals("testBzip2CompressDecompress arrays not equals ",
+          rawData, decompressedBytes);
+      compressor.reset();
+      decompressor.reset();
+    } catch (IOException ex) {
+      fail("testBzip2CompressDecompress ex !!!" + ex);
+    }
+  }
+
+  public static byte[] generate(int size) {
+    byte[] array = new byte[size];
+    for (int i = 0; i < size; i++)
+      array[i] = (byte)rnd.nextInt(16);
+    return array;
+  }
+
+  @Test
+  public void testBzip2CompressDecompressInMultiThreads() throws Exception {
+    MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext();
+    for(int i=0;i<10;i++) {
+      ctx.addThread( new MultithreadedTestUtil.TestingThread(ctx) {
+        @Override
+        public void doWork() throws Exception {
+          testCompressDecompress();
+        }
+      });
+    }
+    ctx.startThreads();
+
+    ctx.waitFor(60000);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/lz4/TestLz4CompressorDecompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/lz4/TestLz4CompressorDecompressor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/lz4/TestLz4CompressorDecompressor.java
index e8555b2..6f3b076 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/lz4/TestLz4CompressorDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/lz4/TestLz4CompressorDecompressor.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.io.compress.CompressionOutputStream;
 import org.apache.hadoop.io.compress.Lz4Codec;
 import org.apache.hadoop.io.compress.lz4.Lz4Compressor;
 import org.apache.hadoop.io.compress.lz4.Lz4Decompressor;
+import org.apache.hadoop.test.MultithreadedTestUtil;
 import org.junit.Before;
 import org.junit.Test;
 import static org.junit.Assume.*;
@@ -313,4 +314,20 @@ public class TestLz4CompressorDecompressor {
       array[i] = (byte)rnd.nextInt(16);
     return array;
   }
+
+  @Test
+  public void testLz4CompressDecompressInMultiThreads() throws Exception {
+    MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext();
+    for(int i=0;i<10;i++) {
+      ctx.addThread( new MultithreadedTestUtil.TestingThread(ctx) {
+        @Override
+        public void doWork() throws Exception {
+          testCompressDecompress();
+        }
+      });
+    }
+    ctx.startThreads();
+
+    ctx.waitFor(60000);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java
index 77fbcc0..cc986c7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.io.compress.CompressionInputStream;
 import org.apache.hadoop.io.compress.CompressionOutputStream;
 import org.apache.hadoop.io.compress.SnappyCodec;
 import org.apache.hadoop.io.compress.snappy.SnappyDecompressor.SnappyDirectDecompressor;
+import org.apache.hadoop.test.MultithreadedTestUtil;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -391,4 +392,20 @@ public class TestSnappyCompressorDecompressor {
       return array;
     }
   }
+
+  @Test
+  public void testSnappyCompressDecompressInMultiThreads() throws Exception {
+    MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext();
+    for(int i=0;i<10;i++) {
+      ctx.addThread( new MultithreadedTestUtil.TestingThread(ctx) {
+        @Override
+        public void doWork() throws Exception {
+          testSnappyCompressDecompress();
+        }
+      });
+    }
+    ctx.startThreads();
+
+    ctx.waitFor(60000);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff83ae72/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java
index db5784c..e751125 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.io.compress.CompressDecompressTester.CompressionTestStr
 import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel;
 import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy;
 import org.apache.hadoop.io.compress.zlib.ZlibDecompressor.ZlibDirectDecompressor;
+import org.apache.hadoop.test.MultithreadedTestUtil;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.junit.Before;
 import org.junit.Test;
@@ -419,4 +420,20 @@ public class TestZlibCompressorDecompressor {
       data[i] = (byte)random.nextInt(16);
     return data;
   }
+
+  @Test
+  public void testZlibCompressDecompressInMultiThreads() throws Exception {
+    MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext();
+    for(int i=0;i<10;i++) {
+      ctx.addThread( new MultithreadedTestUtil.TestingThread(ctx) {
+        @Override
+        public void doWork() throws Exception {
+          testZlibCompressDecompress();
+        }
+      });
+    }
+    ctx.startThreads();
+
+    ctx.waitFor(60000);
+  }
 }


[40/49] hadoop git commit: HDFS-7491. Add incremental blockreport latency to DN metrics. Contributed by Ming Ma.

Posted by zj...@apache.org.
HDFS-7491. Add incremental blockreport latency to DN metrics. Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb34f457
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb34f457
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb34f457

Branch: refs/heads/YARN-2928
Commit: fb34f45727e63ea55377fe90241328025307d818
Parents: 344d7cb
Author: cnauroth <cn...@apache.org>
Authored: Wed Mar 11 14:11:19 2015 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Wed Mar 11 14:11:19 2015 -0700

----------------------------------------------------------------------
 .../hadoop-common/src/site/markdown/Metrics.md                  | 2 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                     | 3 +++
 .../org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java  | 2 ++
 .../hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java    | 5 +++++
 .../apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java | 2 ++
 5 files changed, 14 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb34f457/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 0e0fc09..4a10a00 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -303,6 +303,8 @@ Each metrics record contains tags such as SessionId and Hostname as additional i
 | `HeartbeatsAvgTime` | Average heartbeat time in milliseconds |
 | `BlockReportsNumOps` | Total number of block report operations |
 | `BlockReportsAvgTime` | Average time of block report operations in milliseconds |
+| `IncrementalBlockReportsNumOps` | Total number of incremental block report operations |
+| `IncrementalBlockReportsAvgTime` | Average time of incremental block report operations in milliseconds |
 | `CacheReportsNumOps` | Total number of cache report operations |
 | `CacheReportsAvgTime` | Average time of cache report operations in milliseconds |
 | `PacketAckRoundTripTimeNanosNumOps` | Total number of ack round trip |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb34f457/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0ba34a4..07213dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -740,6 +740,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-6806. HDFS Rolling upgrade document should mention the versions
     available. (J.Andreina via aajisaka)
 
+    HDFS-7491. Add incremental blockreport latency to DN metrics.
+    (Ming Ma via cnauroth)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb34f457/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index ff1ad78..3ba2f54 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -291,12 +291,14 @@ class BPServiceActor implements Runnable {
 
     // Send incremental block reports to the Namenode outside the lock
     boolean success = false;
+    final long startTime = Time.monotonicNow();
     try {
       bpNamenode.blockReceivedAndDeleted(bpRegistration,
           bpos.getBlockPoolId(),
           reports.toArray(new StorageReceivedDeletedBlocks[reports.size()]));
       success = true;
     } finally {
+      dn.getMetrics().addIncrementalBlockReport(Time.monotonicNow()-startTime);
       if (!success) {
         synchronized (pendingIncrementalBRperStorage) {
           for (StorageReceivedDeletedBlocks report : reports) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb34f457/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
index 0fbc2ee..2e8eb22 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
@@ -107,6 +107,7 @@ public class DataNodeMetrics {
   @Metric MutableRate replaceBlockOp;
   @Metric MutableRate heartbeats;
   @Metric MutableRate blockReports;
+  @Metric MutableRate incrementalBlockReports;
   @Metric MutableRate cacheReports;
   @Metric MutableRate packetAckRoundTripTimeNanos;
   final MutableQuantiles[] packetAckRoundTripTimeNanosQuantiles;
@@ -201,6 +202,10 @@ public class DataNodeMetrics {
     blockReports.add(latency);
   }
 
+  public void addIncrementalBlockReport(long latency) {
+    incrementalBlockReports.add(latency);
+  }
+
   public void addCacheReport(long latency) {
     cacheReports.add(latency);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb34f457/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
index 8a2bacf..5d27fe6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
@@ -72,6 +72,8 @@ public class TestDataNodeMetrics {
       DataNode datanode = datanodes.get(0);
       MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
       assertCounter("BytesWritten", LONG_FILE_LEN, rb);
+      assertTrue("Expected non-zero number of incremental block reports",
+          getLongCounter("IncrementalBlockReportsNumOps", rb) > 0);
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }


[35/49] hadoop git commit: DelegateToFileSystem erroneously uses default FS's port in constructor. (Brahma Reddy Battula via gera)

Posted by zj...@apache.org.
DelegateToFileSystem erroneously uses default FS's port in constructor. (Brahma Reddy Battula via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64eb068e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64eb068e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64eb068e

Branch: refs/heads/YARN-2928
Commit: 64eb068ee8863da41df8db44bde1a9033198983d
Parents: aa92b76
Author: Gera Shegalov <ge...@apache.org>
Authored: Tue Mar 10 13:52:06 2015 -0700
Committer: Gera Shegalov <ge...@apache.org>
Committed: Tue Mar 10 13:52:06 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../apache/hadoop/fs/DelegateToFileSystem.java  |  3 +-
 .../src/main/resources/core-default.xml         |  6 +++
 .../hadoop/fs/TestDelegateToFileSystem.java     | 52 ++++++++++++++++++++
 4 files changed, 62 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64eb068e/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index ab58270..3e42aa8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1082,6 +1082,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11692. Improve authentication failure WARN message to avoid user
     confusion. (Yongjun Zhang)
 
+    HADOOP-11618. DelegateToFileSystem erroneously uses default FS's port in
+    constructor. (Brahma Reddy Battula via gera)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64eb068e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
index 09707c6..6b7f387 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
@@ -47,7 +46,7 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
       Configuration conf, String supportedScheme, boolean authorityRequired)
       throws IOException, URISyntaxException {
     super(theUri, supportedScheme, authorityRequired, 
-        FileSystem.getDefaultUri(conf).getPort());
+        theFsImpl.getDefaultPort());
     fsImpl = theFsImpl;
     fsImpl.initialize(theUri, conf);
     fsImpl.statistics = getStatistics();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64eb068e/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 1d531df..46eae0a 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -581,6 +581,12 @@ for ldap providers in the same way as above does.
 </property>
 
 <property>
+  <name>fs.AbstractFileSystem.ftp.impl</name>
+  <value>org.apache.hadoop.fs.ftp.FtpFs</value>
+  <description>The FileSystem for Ftp: uris.</description>
+</property>
+
+<property>
   <name>fs.ftp.host</name>
   <value>0.0.0.0</value>
   <description>FTP filesystem connects to this server</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64eb068e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegateToFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegateToFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegateToFileSystem.java
new file mode 100644
index 0000000..5de3286
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegateToFileSystem.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.net.URI;
+
+import org.apache.commons.net.ftp.FTP;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestDelegateToFileSystem {
+
+  private static final String FTP_DUMMYHOST = "ftp://dummyhost";
+  private static final URI FTP_URI_NO_PORT = URI.create(FTP_DUMMYHOST);
+  private static final URI FTP_URI_WITH_PORT = URI.create(FTP_DUMMYHOST + ":"
+      + FTP.DEFAULT_PORT);
+
+  private void testDefaultUriInternal(String defaultUri)
+      throws UnsupportedFileSystemException {
+    final Configuration conf = new Configuration();
+    FileSystem.setDefaultUri(conf, defaultUri);
+    final AbstractFileSystem ftpFs =
+        AbstractFileSystem.get(FTP_URI_NO_PORT, conf);
+    Assert.assertEquals(FTP_URI_WITH_PORT, ftpFs.getUri());
+  }
+
+  @Test
+  public void testDefaultURIwithOutPort() throws Exception {
+    testDefaultUriInternal("hdfs://dummyhost");
+  }
+
+  @Test
+  public void testDefaultURIwithPort() throws Exception {
+    testDefaultUriInternal("hdfs://dummyhost:8020");
+  }
+}


[10/49] hadoop git commit: HDFS-6488. Support HDFS superuser in NFSv3 gateway. Contributed by Brandon Li

Posted by zj...@apache.org.
HDFS-6488. Support HDFS superuser in NFSv3 gateway. Contributed by Brandon Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f8ecb1d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f8ecb1d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f8ecb1d

Branch: refs/heads/YARN-2928
Commit: 0f8ecb1d0ce6d3ee9a7caf5b15b299210c2b8875
Parents: 27e8ea8
Author: Brandon Li <br...@apache.org>
Authored: Fri Mar 6 15:19:45 2015 -0800
Committer: Brandon Li <br...@apache.org>
Committed: Fri Mar 6 15:19:45 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hdfs/nfs/conf/NfsConfigKeys.java     | 14 +++++++
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java    | 12 +++++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../src/site/markdown/HdfsNfsGateway.md         | 44 +++++++++++++++++---
 4 files changed, 64 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f8ecb1d/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
index 9e4aaf5..09ee579 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
@@ -73,4 +73,18 @@ public class NfsConfigKeys {
   
   public static final String  NFS_METRICS_PERCENTILES_INTERVALS_KEY = "nfs.metrics.percentiles.intervals";
   public static final String  NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT = "";
+  
+  /*
+   * HDFS super-user is the user with the same identity as NameNode process
+   * itself and the super-user can do anything in that permissions checks never
+   * fail for the super-user. If the following property is configured, the
+   * superuser on NFS client can access any file on HDFS. By default, the super
+   * user is not configured in the gateway. Note that, even the the superuser is
+   * configured, "nfs.exports.allowed.hosts" still takes effect. For example,
+   * the superuser will not have write access to HDFS files through the gateway
+   * if the NFS client host is not allowed to have write access in
+   * "nfs.exports.allowed.hosts".
+   */
+  public static final String  NFS_SUPERUSER_KEY = "nfs.superuser";
+  public static final String  NFS_SUPERUSER_DEFAULT = "";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f8ecb1d/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index 05d0674..268abba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.Options;
@@ -166,6 +165,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   private JvmPauseMonitor pauseMonitor;
   private Nfs3HttpServer infoServer = null;
   static Nfs3Metrics metrics;
+  private String superuser;
 
   public RpcProgramNfs3(NfsConfiguration config, DatagramSocket registrationSocket,
       boolean allowInsecurePorts) throws IOException {
@@ -200,6 +200,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     UserGroupInformation.setConfiguration(config);
     SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
         NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
+    superuser = config.get(NfsConfigKeys.NFS_SUPERUSER_KEY,
+        NfsConfigKeys.NFS_SUPERUSER_DEFAULT);
+    LOG.info("Configured HDFS superuser is " + superuser);
 
     if (!enableDump) {
       writeDumpDir = null;
@@ -583,13 +586,18 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     }
 
     try {
-      // HDFS-5804 removed supserUserClient access
       attrs = writeManager.getFileAttr(dfsClient, handle, iug);
 
       if (attrs == null) {
         LOG.error("Can't get path for fileId: " + handle.getFileId());
         return new ACCESS3Response(Nfs3Status.NFS3ERR_STALE);
       }
+      if(iug.getUserName(securityHandler.getUid(), "unknown").equals(superuser)) {
+        int access = Nfs3Constant.ACCESS3_LOOKUP | Nfs3Constant.ACCESS3_DELETE
+            | Nfs3Constant.ACCESS3_EXECUTE | Nfs3Constant.ACCESS3_EXTEND
+            | Nfs3Constant.ACCESS3_MODIFY | Nfs3Constant.ACCESS3_READ;
+        return new ACCESS3Response(Nfs3Status.NFS3_OK, attrs, access);
+      }
       int access = Nfs3Utils.getAccessRightsForUserGroup(
           securityHandler.getUid(), securityHandler.getGid(),
           securityHandler.getAuxGids(), attrs);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f8ecb1d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b443902..29717e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -348,6 +348,8 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7656. Expose truncate API for HDFS httpfs. (yliu)
 
+    HDFS-6488. Support HDFS superuser in NFS gateway. (brandonli)
+
   IMPROVEMENTS
 
     HDFS-7752. Improve description for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f8ecb1d/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
index cea491f..e6666d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
@@ -80,14 +80,33 @@ The above are the only required configuration for the NFS gateway in non-secure
 
 The rest of the NFS gateway configurations are optional for both secure and non-secure mode.
 
-The AIX NFS client has a [few known issues](https://issues.apache.org/jira/browse/HDFS-6549) that prevent it from working correctly by default with the HDFS NFS Gateway. If you want to be able to access the HDFS NFS Gateway from AIX, you should set the following configuration setting to enable work-arounds for these issues:
+*   The AIX NFS client has a [few known issues](https://issues.apache.org/jira/browse/HDFS-6549)
+    that prevent it from working correctly by default with the HDFS NFS Gateway. If you want to
+    be able to access the HDFS NFS Gateway from AIX, you should set the following configuration
+    setting to enable work-arounds for these issues:
 
-    <property>
-      <name>nfs.aix.compatibility.mode.enabled</name>
-      <value>true</value>
-    </property>
+        <property>
+          <name>nfs.aix.compatibility.mode.enabled</name>
+          <value>true</value>
+        </property>
+
+    Note that regular, non-AIX clients should NOT enable AIX compatibility mode. The work-arounds
+    implemented by AIX compatibility mode effectively disable safeguards to ensure that listing
+    of directory contents via NFS returns consistent results, and that all data sent to the NFS
+    server can be assured to have been committed.
+
+*   HDFS super-user is the user with the same identity as NameNode process itself and
+    the super-user can do anything in that permissions checks never fail for the super-user. 
+    If the following property is configured, the superuser on NFS client can access any file
+    on HDFS. By default, the super user is not configured in the gateway.
+    Note that, even the the superuser is configured, "nfs.exports.allowed.hosts" still takes effect. 
+    For example, the superuser will not have write access to HDFS files through the gateway if
+    the NFS client host is not allowed to have write access in "nfs.exports.allowed.hosts".
 
-Note that regular, non-AIX clients should NOT enable AIX compatibility mode. The work-arounds implemented by AIX compatibility mode effectively disable safeguards to ensure that listing of directory contents via NFS returns consistent results, and that all data sent to the NFS server can be assured to have been committed.
+        <property>
+          <name>nfs.superuser</name>
+          <value>the_name_of_hdfs_superuser</value>
+        </property>
 
 It's strongly recommended for the users to update a few configuration properties based on their use cases. All the following configuration properties can be added or updated in hdfs-site.xml.
 
@@ -135,6 +154,19 @@ It's strongly recommended for the users to update a few configuration properties
           <value>* rw</value>
         </property>
 
+*   HDFS super-user is the user with the same identity as NameNode process itself and
+    the super-user can do anything in that permissions checks never fail for the super-user. 
+    If the following property is configured, the superuser on NFS client can access any file
+    on HDFS. By default, the super user is not configured in the gateway.
+    Note that, even the the superuser is configured, "nfs.exports.allowed.hosts" still takes effect. 
+    For example, the superuser will not have write access to HDFS files through the gateway if
+    the NFS client host is not allowed to have write access in "nfs.exports.allowed.hosts".
+
+        <property>
+          <name>nfs.superuser</name>
+          <value>the_name_of_hdfs_superuser</value>
+        </property>
+
 *   JVM and log settings. You can export JVM settings (e.g., heap size and GC log) in
     HADOOP\_NFS3\_OPTS. More NFS related settings can be found in hadoop-env.sh.
     To get NFS debug trace, you can edit the log4j.property file


[39/49] hadoop git commit: HADOOP-11703. git should ignore .DS_Store files on Mac OS X (Abin Shahab via aw)

Posted by zj...@apache.org.
HADOOP-11703. git should ignore .DS_Store files on Mac OS X (Abin Shahab via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/344d7cb6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/344d7cb6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/344d7cb6

Branch: refs/heads/YARN-2928
Commit: 344d7cb6dcd3b1752c82b62271799be9e4e10416
Parents: 30c428a
Author: Allen Wittenauer <aw...@apache.org>
Authored: Wed Mar 11 13:19:12 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Mar 11 13:19:12 2015 -0700

----------------------------------------------------------------------
 .gitignore                                      | 1 +
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 2 files changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/344d7cb6/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index 15c040c..a49ad4b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
+.DS_Store
 *.iml
 *.ipr
 *.iws

http://git-wip-us.apache.org/repos/asf/hadoop/blob/344d7cb6/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3e42aa8..5bca61f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -426,6 +426,9 @@ Trunk (Unreleased)
     HADOOP-11668. hadoop-daemons.sh bw compat broke with --slaves change
     (Vinayakumar B via aw)
 
+    HADOOP-11703. git should ignore .DS_Store files on Mac OS X (Abin Shahab
+    via aw)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)


[11/49] hadoop git commit: HADOOP-11642. Upgrade azure sdk version from 0.6.0 to 2.0.0. Contributed by Shashank Khandelwal and Ivan Mitic.

Posted by zj...@apache.org.
HADOOP-11642. Upgrade azure sdk version from 0.6.0 to 2.0.0. Contributed by Shashank Khandelwal and Ivan Mitic.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/608ebd52
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/608ebd52
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/608ebd52

Branch: refs/heads/YARN-2928
Commit: 608ebd52bafecf980e9726d397c786a9c2422eba
Parents: 0f8ecb1
Author: cnauroth <cn...@apache.org>
Authored: Fri Mar 6 14:59:09 2015 -0800
Committer: cnauroth <cn...@apache.org>
Committed: Fri Mar 6 15:25:01 2015 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 hadoop-project/pom.xml                          |  6 +-
 hadoop-tools/hadoop-azure/pom.xml               |  7 +-
 .../fs/azure/AzureNativeFileSystemStore.java    | 37 ++++++-----
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 10 +--
 .../hadoop/fs/azure/PageBlobFormatHelpers.java  |  2 +-
 .../hadoop/fs/azure/PageBlobInputStream.java    |  8 +--
 .../hadoop/fs/azure/PageBlobOutputStream.java   |  8 +--
 .../hadoop/fs/azure/SelfRenewingLease.java      |  6 +-
 .../fs/azure/SelfThrottlingIntercept.java       | 10 +--
 .../hadoop/fs/azure/SendRequestIntercept.java   | 16 +++--
 .../hadoop/fs/azure/StorageInterface.java       | 24 +++----
 .../hadoop/fs/azure/StorageInterfaceImpl.java   | 46 +++++++------
 .../fs/azure/metrics/ErrorMetricUpdater.java    |  8 +--
 .../metrics/ResponseReceivedMetricUpdater.java  | 10 +--
 .../fs/azure/AzureBlobStorageTestAccount.java   | 28 ++++----
 .../hadoop/fs/azure/MockStorageInterface.java   | 70 ++++++++++++++------
 .../fs/azure/NativeAzureFileSystemBaseTest.java |  6 +-
 .../TestAzureFileSystemErrorConditions.java     |  6 +-
 .../hadoop/fs/azure/TestBlobDataValidation.java | 20 +++---
 .../hadoop/fs/azure/TestContainerChecks.java    |  6 +-
 .../TestOutOfBandAzureBlobOperationsLive.java   |  4 +-
 .../fs/azure/TestWasbUriAndConfiguration.java   |  4 +-
 23 files changed, 190 insertions(+), 155 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 628faa3..14cd75a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -653,6 +653,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11648. Set DomainSocketWatcher thread name explicitly.
     (Liang Xie via ozawa)
 
+    HADOOP-11642. Upgrade azure sdk version from 0.6.0 to 2.0.0.
+    (Shashank Khandelwal and Ivan Mitic via cnauroth)
+
   OPTIMIZATIONS
 
     HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 2c0f03a..a6127c7 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -902,9 +902,9 @@
       </dependency>
 
       <dependency>
-        <groupId>com.microsoft.windowsazure.storage</groupId>
-        <artifactId>microsoft-windowsazure-storage-sdk</artifactId>
-        <version>0.6.0</version>
+        <groupId>com.microsoft.azure</groupId>
+        <artifactId>azure-storage</artifactId>
+        <version>2.0.0</version>
      </dependency>
 
      <dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml
index d39dd76..e9b3af7 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -140,12 +140,13 @@
       <artifactId>httpclient</artifactId>
       <scope>compile</scope>
     </dependency>
-
+    
     <dependency>
-      <groupId>com.microsoft.windowsazure.storage</groupId>
-      <artifactId>microsoft-windowsazure-storage-sdk</artifactId>
+      <groupId>com.microsoft.azure</groupId>
+      <artifactId>azure-storage</artifactId>
       <scope>compile</scope>
     </dependency>
+    
 
     <dependency>
       <groupId>com.google.guava</groupId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index c0c03b3..b664fe7 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -65,23 +65,23 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.mortbay.util.ajax.JSON;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.microsoft.windowsazure.storage.CloudStorageAccount;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.RetryExponentialRetry;
-import com.microsoft.windowsazure.storage.RetryNoRetry;
-import com.microsoft.windowsazure.storage.StorageCredentials;
-import com.microsoft.windowsazure.storage.StorageCredentialsAccountAndKey;
-import com.microsoft.windowsazure.storage.StorageCredentialsSharedAccessSignature;
-import com.microsoft.windowsazure.storage.StorageErrorCode;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.blob.BlobListingDetails;
-import com.microsoft.windowsazure.storage.blob.BlobProperties;
-import com.microsoft.windowsazure.storage.blob.BlobRequestOptions;
-import com.microsoft.windowsazure.storage.blob.CloudBlob;
-import com.microsoft.windowsazure.storage.blob.CopyStatus;
-import com.microsoft.windowsazure.storage.blob.DeleteSnapshotsOption;
-import com.microsoft.windowsazure.storage.blob.ListBlobItem;
-import com.microsoft.windowsazure.storage.core.Utility;
+import com.microsoft.azure.storage.CloudStorageAccount;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.RetryExponentialRetry;
+import com.microsoft.azure.storage.RetryNoRetry;
+import com.microsoft.azure.storage.StorageCredentials;
+import com.microsoft.azure.storage.StorageCredentialsAccountAndKey;
+import com.microsoft.azure.storage.StorageCredentialsSharedAccessSignature;
+import com.microsoft.azure.storage.StorageErrorCode;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.BlobListingDetails;
+import com.microsoft.azure.storage.blob.BlobProperties;
+import com.microsoft.azure.storage.blob.BlobRequestOptions;
+import com.microsoft.azure.storage.blob.CloudBlob;
+import com.microsoft.azure.storage.blob.CopyStatus;
+import com.microsoft.azure.storage.blob.DeleteSnapshotsOption;
+import com.microsoft.azure.storage.blob.ListBlobItem;
+import com.microsoft.azure.storage.core.Utility;
 
 /**
  * Core implementation of Windows Azure Filesystem for Hadoop.
@@ -2543,7 +2543,8 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
     try {
       checkContainer(ContainerAccessType.ReadThenWrite);
       CloudBlobWrapper blob = getBlobReference(key);
-      blob.getProperties().setLastModified(lastModified);
+      //setLastModified function is not available in 2.0.0 version. blob.uploadProperties automatically updates last modified
+      //timestamp to current time
       blob.uploadProperties(getInstrumentedContext(), folderLease);
     } catch (Exception e) {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 0248b85..e39b37d 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -74,11 +74,11 @@ import org.codehaus.jackson.map.JsonMappingException;
 import org.codehaus.jackson.map.ObjectMapper;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.microsoft.windowsazure.storage.AccessCondition;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.blob.CloudBlob;
-import com.microsoft.windowsazure.storage.core.*;
+import com.microsoft.azure.storage.AccessCondition;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.CloudBlob;
+import com.microsoft.azure.storage.core.*;
 
 /**
  * <p>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobFormatHelpers.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobFormatHelpers.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobFormatHelpers.java
index ad11aac..9a316a5 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobFormatHelpers.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobFormatHelpers.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.fs.azure;
 
 import java.nio.ByteBuffer;
 
-import com.microsoft.windowsazure.storage.blob.BlobRequestOptions;
+import com.microsoft.azure.storage.blob.BlobRequestOptions;
 
 /**
  * Constants and helper methods for ASV's custom data format in page blobs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
index 62b47ee..468ac65 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
@@ -33,10 +33,10 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper;
 
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.blob.BlobRequestOptions;
-import com.microsoft.windowsazure.storage.blob.PageRange;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.BlobRequestOptions;
+import com.microsoft.azure.storage.blob.PageRange;
 
 /**
  * An input stream that reads file data from a page blob stored

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
index 4d1d5c8..2b8846c 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
@@ -44,10 +44,10 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.blob.BlobRequestOptions;
-import com.microsoft.windowsazure.storage.blob.CloudPageBlob;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.BlobRequestOptions;
+import com.microsoft.azure.storage.blob.CloudPageBlob;
 
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
index bda6006..06f32ce 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
@@ -22,9 +22,9 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper;
 
-import com.microsoft.windowsazure.storage.AccessCondition;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.blob.CloudBlob;
+import com.microsoft.azure.storage.AccessCondition;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.CloudBlob;
 
 import java.util.concurrent.atomic.AtomicInteger;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java
index d18a144..a9e3df9 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java
@@ -25,11 +25,11 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.RequestResult;
-import com.microsoft.windowsazure.storage.ResponseReceivedEvent;
-import com.microsoft.windowsazure.storage.SendingRequestEvent;
-import com.microsoft.windowsazure.storage.StorageEvent;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.RequestResult;
+import com.microsoft.azure.storage.ResponseReceivedEvent;
+import com.microsoft.azure.storage.SendingRequestEvent;
+import com.microsoft.azure.storage.StorageEvent;
 
 /*
  * Self throttling is implemented by hooking into send & response callbacks 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java
index 18f173e..4d564d5 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java
@@ -25,12 +25,13 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 
-import com.microsoft.windowsazure.storage.Constants.HeaderConstants;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.SendingRequestEvent;
-import com.microsoft.windowsazure.storage.StorageCredentials;
-import com.microsoft.windowsazure.storage.StorageEvent;
-import com.microsoft.windowsazure.storage.StorageException;
+import com.microsoft.azure.storage.Constants.HeaderConstants;
+import com.microsoft.azure.storage.core.StorageCredentialsHelper;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.SendingRequestEvent;
+import com.microsoft.azure.storage.StorageCredentials;
+import com.microsoft.azure.storage.StorageEvent;
+import com.microsoft.azure.storage.StorageException;
 
 /**
  * Manages the lifetime of binding on the operation contexts to intercept send
@@ -146,7 +147,8 @@ public final class SendRequestIntercept extends StorageEvent<SendingRequestEvent
       try {
         // Sign the request. GET's have no payload so the content length is
         // zero.
-        getCredentials().signBlobAndQueueRequest(urlConnection, -1L, getOperationContext());
+        StorageCredentialsHelper.signBlobAndQueueRequest(getCredentials(),
+          urlConnection, -1L, getOperationContext());
       } catch (InvalidKeyException e) {
         // Log invalid key exception to track signing error before the send
         // fails.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
index 8d0229d..91928a2 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
@@ -29,18 +29,18 @@ import java.util.HashMap;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 
-import com.microsoft.windowsazure.storage.CloudStorageAccount;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.RetryPolicyFactory;
-import com.microsoft.windowsazure.storage.StorageCredentials;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.blob.BlobListingDetails;
-import com.microsoft.windowsazure.storage.blob.BlobProperties;
-import com.microsoft.windowsazure.storage.blob.BlobRequestOptions;
-import com.microsoft.windowsazure.storage.blob.CloudBlob;
-import com.microsoft.windowsazure.storage.blob.CopyState;
-import com.microsoft.windowsazure.storage.blob.ListBlobItem;
-import com.microsoft.windowsazure.storage.blob.PageRange;
+import com.microsoft.azure.storage.CloudStorageAccount;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.RetryPolicyFactory;
+import com.microsoft.azure.storage.StorageCredentials;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.BlobListingDetails;
+import com.microsoft.azure.storage.blob.BlobProperties;
+import com.microsoft.azure.storage.blob.BlobRequestOptions;
+import com.microsoft.azure.storage.blob.CloudBlob;
+import com.microsoft.azure.storage.blob.CopyState;
+import com.microsoft.azure.storage.blob.ListBlobItem;
+import com.microsoft.azure.storage.blob.PageRange;
 
 /**
  * This is a very thin layer over the methods exposed by the Windows Azure

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
index e44823c..2120536 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
@@ -30,26 +30,26 @@ import java.util.Iterator;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 
-import com.microsoft.windowsazure.storage.AccessCondition;
-import com.microsoft.windowsazure.storage.CloudStorageAccount;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.RetryPolicyFactory;
-import com.microsoft.windowsazure.storage.StorageCredentials;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.StorageUri;
-import com.microsoft.windowsazure.storage.blob.BlobListingDetails;
-import com.microsoft.windowsazure.storage.blob.BlobProperties;
-import com.microsoft.windowsazure.storage.blob.BlobRequestOptions;
-import com.microsoft.windowsazure.storage.blob.CloudBlob;
-import com.microsoft.windowsazure.storage.blob.CloudBlobClient;
-import com.microsoft.windowsazure.storage.blob.CloudBlobContainer;
-import com.microsoft.windowsazure.storage.blob.CloudBlobDirectory;
-import com.microsoft.windowsazure.storage.blob.CloudBlockBlob;
-import com.microsoft.windowsazure.storage.blob.CloudPageBlob;
-import com.microsoft.windowsazure.storage.blob.CopyState;
-import com.microsoft.windowsazure.storage.blob.DeleteSnapshotsOption;
-import com.microsoft.windowsazure.storage.blob.ListBlobItem;
-import com.microsoft.windowsazure.storage.blob.PageRange;
+import com.microsoft.azure.storage.AccessCondition;
+import com.microsoft.azure.storage.CloudStorageAccount;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.RetryPolicyFactory;
+import com.microsoft.azure.storage.StorageCredentials;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.StorageUri;
+import com.microsoft.azure.storage.blob.BlobListingDetails;
+import com.microsoft.azure.storage.blob.BlobProperties;
+import com.microsoft.azure.storage.blob.BlobRequestOptions;
+import com.microsoft.azure.storage.blob.CloudBlob;
+import com.microsoft.azure.storage.blob.CloudBlobClient;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import com.microsoft.azure.storage.blob.CloudBlobDirectory;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
+import com.microsoft.azure.storage.blob.CloudPageBlob;
+import com.microsoft.azure.storage.blob.CopyState;
+import com.microsoft.azure.storage.blob.DeleteSnapshotsOption;
+import com.microsoft.azure.storage.blob.ListBlobItem;
+import com.microsoft.azure.storage.blob.PageRange;
 
 /**
  * A real implementation of the Azure interaction layer that just redirects
@@ -61,12 +61,14 @@ class StorageInterfaceImpl extends StorageInterface {
 
   @Override
   public void setRetryPolicyFactory(final RetryPolicyFactory retryPolicyFactory) {
-    serviceClient.setRetryPolicyFactory(retryPolicyFactory);
+    serviceClient.getDefaultRequestOptions().setRetryPolicyFactory(
+            retryPolicyFactory);
   }
 
   @Override
   public void setTimeoutInMs(int timeoutInMs) {
-    serviceClient.setTimeoutInMs(timeoutInMs);
+    serviceClient.getDefaultRequestOptions().setTimeoutIntervalInMs(
+            timeoutInMs);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ErrorMetricUpdater.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ErrorMetricUpdater.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ErrorMetricUpdater.java
index d33e8c4..dc23354 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ErrorMetricUpdater.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ErrorMetricUpdater.java
@@ -24,10 +24,10 @@ import static java.net.HttpURLConnection.HTTP_INTERNAL_ERROR; //500
 
 import org.apache.hadoop.classification.InterfaceAudience;
 
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.RequestResult;
-import com.microsoft.windowsazure.storage.ResponseReceivedEvent;
-import com.microsoft.windowsazure.storage.StorageEvent;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.RequestResult;
+import com.microsoft.azure.storage.ResponseReceivedEvent;
+import com.microsoft.azure.storage.StorageEvent;
 
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java
index 676adb9..de503bf 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java
@@ -24,11 +24,11 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 
-import com.microsoft.windowsazure.storage.Constants.HeaderConstants;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.RequestResult;
-import com.microsoft.windowsazure.storage.ResponseReceivedEvent;
-import com.microsoft.windowsazure.storage.StorageEvent;
+import com.microsoft.azure.storage.Constants.HeaderConstants;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.RequestResult;
+import com.microsoft.azure.storage.ResponseReceivedEvent;
+import com.microsoft.azure.storage.StorageEvent;
 
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
index b8ff912..635c024 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
@@ -40,20 +40,20 @@ import org.apache.hadoop.metrics2.MetricsSink;
 import org.apache.hadoop.metrics2.MetricsTag;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 
-import com.microsoft.windowsazure.storage.AccessCondition;
-import com.microsoft.windowsazure.storage.CloudStorageAccount;
-import com.microsoft.windowsazure.storage.StorageCredentials;
-import com.microsoft.windowsazure.storage.StorageCredentialsAccountAndKey;
-import com.microsoft.windowsazure.storage.StorageCredentialsAnonymous;
-import com.microsoft.windowsazure.storage.blob.BlobContainerPermissions;
-import com.microsoft.windowsazure.storage.blob.BlobContainerPublicAccessType;
-import com.microsoft.windowsazure.storage.blob.BlobOutputStream;
-import com.microsoft.windowsazure.storage.blob.CloudBlobClient;
-import com.microsoft.windowsazure.storage.blob.CloudBlobContainer;
-import com.microsoft.windowsazure.storage.blob.CloudBlockBlob;
-import com.microsoft.windowsazure.storage.blob.SharedAccessBlobPermissions;
-import com.microsoft.windowsazure.storage.blob.SharedAccessBlobPolicy;
-import com.microsoft.windowsazure.storage.core.Base64;
+import com.microsoft.azure.storage.AccessCondition;
+import com.microsoft.azure.storage.CloudStorageAccount;
+import com.microsoft.azure.storage.StorageCredentials;
+import com.microsoft.azure.storage.StorageCredentialsAccountAndKey;
+import com.microsoft.azure.storage.StorageCredentialsAnonymous;
+import com.microsoft.azure.storage.blob.BlobContainerPermissions;
+import com.microsoft.azure.storage.blob.BlobContainerPublicAccessType;
+import com.microsoft.azure.storage.blob.BlobOutputStream;
+import com.microsoft.azure.storage.blob.CloudBlobClient;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
+import com.microsoft.azure.storage.blob.SharedAccessBlobPermissions;
+import com.microsoft.azure.storage.blob.SharedAccessBlobPolicy;
+import com.microsoft.azure.storage.core.Base64;
 
 /**
  * Helper class to create WASB file systems backed by either a mock in-memory

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
index 047ea1b..c51c05b 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
@@ -22,10 +22,12 @@ import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.lang.reflect.Method;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.Calendar;
+import java.util.Date;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -35,21 +37,21 @@ import org.apache.commons.httpclient.URIException;
 import org.apache.commons.httpclient.util.URIUtil;
 import org.apache.commons.lang.NotImplementedException;
 
-import com.microsoft.windowsazure.storage.CloudStorageAccount;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.RetryPolicyFactory;
-import com.microsoft.windowsazure.storage.StorageCredentials;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.StorageUri;
-import com.microsoft.windowsazure.storage.blob.BlobListingDetails;
-import com.microsoft.windowsazure.storage.blob.BlobProperties;
-import com.microsoft.windowsazure.storage.blob.BlobRequestOptions;
-import com.microsoft.windowsazure.storage.blob.CloudBlob;
-import com.microsoft.windowsazure.storage.blob.CloudBlobContainer;
-import com.microsoft.windowsazure.storage.blob.CloudBlobDirectory;
-import com.microsoft.windowsazure.storage.blob.CopyState;
-import com.microsoft.windowsazure.storage.blob.ListBlobItem;
-import com.microsoft.windowsazure.storage.blob.PageRange;
+import com.microsoft.azure.storage.CloudStorageAccount;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.RetryPolicyFactory;
+import com.microsoft.azure.storage.StorageCredentials;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.StorageUri;
+import com.microsoft.azure.storage.blob.BlobListingDetails;
+import com.microsoft.azure.storage.blob.BlobProperties;
+import com.microsoft.azure.storage.blob.BlobRequestOptions;
+import com.microsoft.azure.storage.blob.CloudBlob;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import com.microsoft.azure.storage.blob.CloudBlobDirectory;
+import com.microsoft.azure.storage.blob.CopyState;
+import com.microsoft.azure.storage.blob.ListBlobItem;
+import com.microsoft.azure.storage.blob.PageRange;
 
 import javax.ws.rs.core.UriBuilder;
 import javax.ws.rs.core.UriBuilderException;
@@ -357,18 +359,42 @@ public class MockStorageInterface extends StorageInterface {
       this.uri = uri;
       this.metadata = metadata;
       this.properties = new BlobProperties();
-      this.properties.setLength(length);
-      this.properties.setLastModified(
-          Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTime());
+      
+      this.properties=updateLastModifed(this.properties);
+      this.properties=updateLength(this.properties,length);
+    }
+    
+    protected BlobProperties updateLastModifed(BlobProperties properties){
+      try{
+          Method setLastModified =properties.getClass().
+            getDeclaredMethod("setLastModified", Date.class);
+          setLastModified.setAccessible(true);
+          setLastModified.invoke(this.properties,
+            Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTime());
+      }catch(Exception e){
+          throw new RuntimeException(e);
+      }
+      return properties;
     }
-
+    
+    protected BlobProperties updateLength(BlobProperties properties,int length) {
+      try{
+          Method setLength =properties.getClass().
+            getDeclaredMethod("setLength", long.class);
+          setLength.setAccessible(true);
+          setLength.invoke(this.properties, length);
+      }catch (Exception e){
+         throw new RuntimeException(e);
+      }
+      return properties;
+    }
+    
     protected void refreshProperties(boolean getMetadata) {
       if (backingStore.exists(convertUriToDecodedString(uri))) {
         byte[] content = backingStore.getContent(convertUriToDecodedString(uri));
         properties = new BlobProperties();
-        properties.setLength(content.length);
-        properties.setLastModified(
-            Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTime());
+        this.properties=updateLastModifed(this.properties);
+        this.properties=updateLength(this.properties, content.length);
         if (getMetadata) {
           metadata = backingStore.getMetadata(convertUriToDecodedString(uri));
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
index 01cf713..9ce6cc9 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
@@ -58,9 +58,9 @@ import org.junit.Test;
 import org.apache.hadoop.fs.azure.AzureException;
 import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending;
 
-import com.microsoft.windowsazure.storage.AccessCondition;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.blob.CloudBlob;
+import com.microsoft.azure.storage.AccessCondition;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.CloudBlob;
 
 /*
  * Tests the Native Azure file system (WASB) against an actual blob store if

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
index febb605..ace57dc 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
@@ -37,9 +37,9 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
 import org.junit.Test;
 
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.SendingRequestEvent;
-import com.microsoft.windowsazure.storage.StorageEvent;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.SendingRequestEvent;
+import com.microsoft.azure.storage.StorageEvent;
 
 public class TestAzureFileSystemErrorConditions {
   private static final int ALL_THREE_FILE_SIZE = 1024;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java
index 25bd338..9237ade 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java
@@ -39,16 +39,16 @@ import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationCo
 import org.junit.After;
 import org.junit.Test;
 
-import com.microsoft.windowsazure.storage.Constants;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.ResponseReceivedEvent;
-import com.microsoft.windowsazure.storage.StorageErrorCodeStrings;
-import com.microsoft.windowsazure.storage.StorageEvent;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.blob.BlockEntry;
-import com.microsoft.windowsazure.storage.blob.BlockSearchMode;
-import com.microsoft.windowsazure.storage.blob.CloudBlockBlob;
-import com.microsoft.windowsazure.storage.core.Base64;
+import com.microsoft.azure.storage.Constants;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.ResponseReceivedEvent;
+import com.microsoft.azure.storage.StorageErrorCodeStrings;
+import com.microsoft.azure.storage.StorageEvent;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.BlockEntry;
+import com.microsoft.azure.storage.blob.BlockSearchMode;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
+import com.microsoft.azure.storage.core.Base64;
 
 /**
  * Test that we do proper data integrity validation with MD5 checks as

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java
index 727f540..56ec881 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java
@@ -32,9 +32,9 @@ import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.CreateOptions;
 import org.junit.After;
 import org.junit.Test;
 
-import com.microsoft.windowsazure.storage.blob.BlobOutputStream;
-import com.microsoft.windowsazure.storage.blob.CloudBlobContainer;
-import com.microsoft.windowsazure.storage.blob.CloudBlockBlob;
+import com.microsoft.azure.storage.blob.BlobOutputStream;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
 
 /**
  * Tests that WASB creates containers only if needed.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
index 9ac67e7..60b01c6 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
@@ -29,8 +29,8 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-import com.microsoft.windowsazure.storage.blob.BlobOutputStream;
-import com.microsoft.windowsazure.storage.blob.CloudBlockBlob;
+import com.microsoft.azure.storage.blob.BlobOutputStream;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
 
 public class TestOutOfBandAzureBlobOperationsLive {
   private FileSystem fs;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ebd52/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
index 0360e32..a4ca6fd 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
@@ -42,8 +42,8 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Test;
 
-import com.microsoft.windowsazure.storage.blob.CloudBlobContainer;
-import com.microsoft.windowsazure.storage.blob.CloudBlockBlob;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
 
 public class TestWasbUriAndConfiguration {
 


[02/49] hadoop git commit: YARN-1809. Synchronize RM and TimeLineServer Web-UIs. Contributed by Zhijie Shen and Xuan Gong

Posted by zj...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
index 7bac6f2..2cd7580 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
@@ -20,15 +20,16 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
 
 import static org.apache.hadoop.yarn.webapp.Params.TITLE;
 import static org.mockito.Mockito.mock;
-import org.junit.Assert;
 
+import org.junit.Assert;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryClientService;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManager;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManagerImpl;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStore;
@@ -68,8 +69,8 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
   @Test
   public void testView() throws Exception {
     Injector injector =
-        WebAppTests.createMockInjector(ApplicationContext.class,
-          mockApplicationHistoryManager(5, 1, 1));
+        WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+          mockApplicationHistoryClientService(5, 1, 1));
     AHSView ahsViewInstance = injector.getInstance(AHSView.class);
 
     ahsViewInstance.render();
@@ -89,8 +90,8 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
   @Test
   public void testAppPage() throws Exception {
     Injector injector =
-        WebAppTests.createMockInjector(ApplicationContext.class,
-          mockApplicationHistoryManager(1, 5, 1));
+        WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+          mockApplicationHistoryClientService(1, 5, 1));
     AppPage appPageInstance = injector.getInstance(AppPage.class);
 
     appPageInstance.render();
@@ -105,8 +106,8 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
   @Test
   public void testAppAttemptPage() throws Exception {
     Injector injector =
-        WebAppTests.createMockInjector(ApplicationContext.class,
-          mockApplicationHistoryManager(1, 1, 5));
+        WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+          mockApplicationHistoryClientService(1, 1, 5));
     AppAttemptPage appAttemptPageInstance =
         injector.getInstance(AppAttemptPage.class);
 
@@ -123,8 +124,8 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
   @Test
   public void testContainerPage() throws Exception {
     Injector injector =
-        WebAppTests.createMockInjector(ApplicationContext.class,
-          mockApplicationHistoryManager(1, 1, 1));
+        WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+          mockApplicationHistoryClientService(1, 1, 1));
     ContainerPage containerPageInstance =
         injector.getInstance(ContainerPage.class);
 
@@ -141,10 +142,12 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
     WebAppTests.flushOutput(injector);
   }
 
-  ApplicationHistoryManager mockApplicationHistoryManager(int numApps,
+  ApplicationHistoryClientService mockApplicationHistoryClientService(int numApps,
       int numAppAttempts, int numContainers) throws Exception {
     ApplicationHistoryManager ahManager =
         new MockApplicationHistoryManagerImpl(store);
+    ApplicationHistoryClientService historyClientService =
+        new ApplicationHistoryClientService(ahManager);
     for (int i = 1; i <= numApps; ++i) {
       ApplicationId appId = ApplicationId.newInstance(0, i);
       writeApplicationStartData(appId);
@@ -161,7 +164,7 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
       }
       writeApplicationFinishData(appId);
     }
-    return ahManager;
+    return historyClientService;
   }
 
   class MockApplicationHistoryManagerImpl extends ApplicationHistoryManagerImpl {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
index 41dda91..913b80d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
@@ -28,13 +28,11 @@ import java.util.Properties;
 import javax.servlet.FilterConfig;
 import javax.servlet.ServletException;
 import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.http.lib.StaticUserWebFilter.StaticUserFilter;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
 import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -44,7 +42,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryClientService;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManagerOnTimelineStore;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.TestApplicationHistoryManagerOnTimelineStore;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
@@ -79,7 +77,7 @@ import com.sun.jersey.test.framework.WebAppDescriptor;
 @RunWith(Parameterized.class)
 public class TestAHSWebServices extends JerseyTestBase {
 
-  private static ApplicationHistoryManagerOnTimelineStore historyManager;
+  private static ApplicationHistoryClientService historyClientService;
   private static final String[] USERS = new String[] { "foo" , "bar" };
 
   @BeforeClass
@@ -93,16 +91,23 @@ public class TestAHSWebServices extends JerseyTestBase {
     conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
     conf.set(YarnConfiguration.YARN_ADMIN_ACL, "foo");
     ApplicationACLsManager appAclsManager = new ApplicationACLsManager(conf);
-    historyManager =
+    ApplicationHistoryManagerOnTimelineStore historyManager =
         new ApplicationHistoryManagerOnTimelineStore(dataManager, appAclsManager);
     historyManager.init(conf);
-    historyManager.start();
+    historyClientService = new ApplicationHistoryClientService(historyManager) {
+      @Override
+      protected void serviceStart() throws Exception {
+        // Do Nothing
+      }
+    };
+    historyClientService.init(conf);
+    historyClientService.start();
   }
 
   @AfterClass
   public static void tearDownClass() throws Exception {
-    if (historyManager != null) {
-      historyManager.stop();
+    if (historyClientService != null) {
+      historyClientService.stop();
     }
   }
 
@@ -118,7 +123,7 @@ public class TestAHSWebServices extends JerseyTestBase {
       bind(JAXBContextResolver.class);
       bind(AHSWebServices.class);
       bind(GenericExceptionHandler.class);
-      bind(ApplicationContext.class).toInstance(historyManager);
+      bind(ApplicationBaseProtocol.class).toInstance(historyClientService);
       serve("/*").with(GuiceContainer.class);
       filter("/*").through(TestSimpleAuthFilter.class);
     }
@@ -372,5 +377,4 @@ public class TestAHSWebServices extends JerseyTestBase {
     assertEquals(ContainerState.COMPLETE.toString(),
       container.getString("containerState"));
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationContext.java
deleted file mode 100644
index 0e2ffdf..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationContext.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.api;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerReport;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-
-public interface ApplicationContext {
-  /**
-   * This method returns Application {@link ApplicationReport} for the specified
-   * {@link ApplicationId}.
-   * 
-   * @param appId
-   * 
-   * @return {@link ApplicationReport} for the ApplicationId.
-   * @throws YarnException
-   * @throws IOException
-   */
-  ApplicationReport getApplication(ApplicationId appId)
-      throws YarnException, IOException;
-
-  /**
-   * This method returns all Application {@link ApplicationReport}s
-   * 
-   * @return map of {@link ApplicationId} to {@link ApplicationReport}s.
-   * @throws YarnException
-   * @throws IOException
-   */
-  Map<ApplicationId, ApplicationReport> getAllApplications()
-      throws YarnException, IOException;
-
-  /**
-   * Application can have multiple application attempts
-   * {@link ApplicationAttemptReport}. This method returns the all
-   * {@link ApplicationAttemptReport}s for the Application.
-   * 
-   * @param appId
-   * 
-   * @return all {@link ApplicationAttemptReport}s for the Application.
-   * @throws YarnException
-   * @throws IOException
-   */
-  Map<ApplicationAttemptId, ApplicationAttemptReport> getApplicationAttempts(
-      ApplicationId appId) throws YarnException, IOException;
-
-  /**
-   * This method returns {@link ApplicationAttemptReport} for specified
-   * {@link ApplicationId}.
-   * 
-   * @param appAttemptId
-   *          {@link ApplicationAttemptId}
-   * @return {@link ApplicationAttemptReport} for ApplicationAttemptId
-   * @throws YarnException
-   * @throws IOException
-   */
-  ApplicationAttemptReport getApplicationAttempt(
-      ApplicationAttemptId appAttemptId) throws YarnException, IOException;
-
-  /**
-   * This method returns {@link ContainerReport} for specified
-   * {@link ContainerId}.
-   * 
-   * @param containerId
-   *          {@link ContainerId}
-   * @return {@link ContainerReport} for ContainerId
-   * @throws YarnException
-   * @throws IOException
-   */
-  ContainerReport getContainer(ContainerId containerId)
-      throws YarnException, IOException;
-
-  /**
-   * This method returns {@link ContainerReport} for specified
-   * {@link ApplicationAttemptId}.
-   * 
-   * @param appAttemptId
-   *          {@link ApplicationAttemptId}
-   * @return {@link ContainerReport} for ApplicationAttemptId
-   * @throws YarnException
-   * @throws IOException
-   */
-  ContainerReport getAMContainer(ApplicationAttemptId appAttemptId)
-      throws YarnException, IOException;
-
-  /**
-   * This method returns Map of {@link ContainerId} to {@link ContainerReport}
-   * for specified {@link ApplicationAttemptId}.
-   * 
-   * @param appAttemptId
-   *          {@link ApplicationAttemptId}
-   * @return Map of {@link ContainerId} to {@link ContainerReport} for
-   *         ApplicationAttemptId
-   * @throws YarnException
-   * @throws IOException
-   */
-  Map<ContainerId, ContainerReport> getContainers(
-      ApplicationAttemptId appAttemptId) throws YarnException, IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
index 4a02892..ea33f4f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
@@ -27,10 +27,14 @@ import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
 import org.apache.hadoop.yarn.util.ConverterUtils;
@@ -45,11 +49,12 @@ import com.google.inject.Inject;
 public class AppAttemptBlock extends HtmlBlock {
 
   private static final Log LOG = LogFactory.getLog(AppAttemptBlock.class);
-  private final ApplicationContext appContext;
+  protected ApplicationBaseProtocol appBaseProt;
 
   @Inject
-  public AppAttemptBlock(ApplicationContext appContext) {
-    this.appContext = appContext;
+  public AppAttemptBlock(ApplicationBaseProtocol appBaseProt, ViewContext ctx) {
+    super(ctx);
+    this.appBaseProt = appBaseProt;
   }
 
   @Override
@@ -68,18 +73,22 @@ public class AppAttemptBlock extends HtmlBlock {
       return;
     }
 
-    final ApplicationAttemptId appAttemptIdFinal = appAttemptId;
     UserGroupInformation callerUGI = getCallerUGI();
-    ApplicationAttemptReport appAttemptReport;
+    ApplicationAttemptReport appAttemptReport = null;
     try {
+      final GetApplicationAttemptReportRequest request =
+          GetApplicationAttemptReportRequest.newInstance(appAttemptId);
       if (callerUGI == null) {
-        appAttemptReport = appContext.getApplicationAttempt(appAttemptId);
+        appAttemptReport =
+            appBaseProt.getApplicationAttemptReport(request)
+              .getApplicationAttemptReport();
       } else {
         appAttemptReport = callerUGI.doAs(
             new PrivilegedExceptionAction<ApplicationAttemptReport> () {
           @Override
           public ApplicationAttemptReport run() throws Exception {
-            return appContext.getApplicationAttempt(appAttemptIdFinal);
+            return appBaseProt.getApplicationAttemptReport(request)
+                .getApplicationAttemptReport();
           }
         });
       }
@@ -90,10 +99,35 @@ public class AppAttemptBlock extends HtmlBlock {
       html.p()._(message)._();
       return;
     }
+
     if (appAttemptReport == null) {
       puts("Application Attempt not found: " + attemptid);
       return;
     }
+
+    boolean exceptionWhenGetContainerReports = false;
+    Collection<ContainerReport> containers = null;
+    try {
+      final GetContainersRequest request =
+          GetContainersRequest.newInstance(appAttemptId);
+      if (callerUGI == null) {
+        containers = appBaseProt.getContainers(request).getContainerList();
+      } else {
+        containers = callerUGI.doAs(
+            new PrivilegedExceptionAction<Collection<ContainerReport>> () {
+          @Override
+          public Collection<ContainerReport> run() throws Exception {
+            return  appBaseProt.getContainers(request).getContainerList();
+          }
+        });
+      }
+    } catch (RuntimeException e) {
+      // have this block to suppress the findbugs warning
+      exceptionWhenGetContainerReports = true;
+    } catch (Exception e) {
+      exceptionWhenGetContainerReports = true;
+    }
+
     AppAttemptInfo appAttempt = new AppAttemptInfo(appAttemptReport);
 
     setTitle(join("Application Attempt ", attemptid));
@@ -104,43 +138,35 @@ public class AppAttemptBlock extends HtmlBlock {
       node = appAttempt.getHost() + ":" + appAttempt.getRpcPort();
     }
     info("Application Attempt Overview")
-      ._("State", appAttempt.getAppAttemptState())
       ._(
-        "Master Container",
-        appAttempt.getAmContainerId() == null ? "#" : root_url("container",
-          appAttempt.getAmContainerId()),
+        "Application Attempt State:",
+        appAttempt.getAppAttemptState() == null ? UNAVAILABLE : appAttempt
+          .getAppAttemptState())
+      ._(
+        "AM Container:",
+        appAttempt.getAmContainerId() == null || containers == null
+            || !hasAMContainer(appAttemptReport.getAMContainerId(), containers)
+            ? null : root_url("container", appAttempt.getAmContainerId()),
         String.valueOf(appAttempt.getAmContainerId()))
       ._("Node:", node)
       ._(
         "Tracking URL:",
-        appAttempt.getTrackingUrl() == null ? "#" : root_url(appAttempt
-          .getTrackingUrl()), "History")
-      ._("Diagnostics Info:", appAttempt.getDiagnosticsInfo());
+        appAttempt.getTrackingUrl() == null
+            || appAttempt.getTrackingUrl() == UNAVAILABLE ? null
+            : root_url(appAttempt.getTrackingUrl()),
+        appAttempt.getTrackingUrl() == null
+            || appAttempt.getTrackingUrl() == UNAVAILABLE
+            ? "Unassigned"
+            : appAttempt.getAppAttemptState() == YarnApplicationAttemptState.FINISHED
+                || appAttempt.getAppAttemptState() == YarnApplicationAttemptState.FAILED
+                || appAttempt.getAppAttemptState() == YarnApplicationAttemptState.KILLED
+                ? "History" : "ApplicationMaster")
+      ._("Diagnostics Info:", appAttempt.getDiagnosticsInfo() == null ?
+          "" : appAttempt.getDiagnosticsInfo());
 
     html._(InfoBlock.class);
 
-    Collection<ContainerReport> containers;
-    try {
-      if (callerUGI == null) {
-        containers = appContext.getContainers(appAttemptId).values();
-      } else {
-        containers = callerUGI.doAs(
-            new PrivilegedExceptionAction<Collection<ContainerReport>> () {
-          @Override
-          public Collection<ContainerReport> run() throws Exception {
-            return  appContext.getContainers(appAttemptIdFinal).values();
-          }
-        });
-      }
-    } catch (RuntimeException e) {
-      // have this block to suppress the findbugs warning
-      html
-      .p()
-      ._(
-        "Sorry, Failed to get containers for application attempt" + attemptid
-            + ".")._();
-      return;
-    } catch (Exception e) {
+    if (exceptionWhenGetContainerReports) {
       html
         .p()
         ._(
@@ -166,11 +192,12 @@ public class AppAttemptBlock extends HtmlBlock {
         .append("'>")
         .append(container.getContainerId())
         .append("</a>\",\"<a href='")
-        .append(container.getAssignedNodeId())
+        .append("#") // TODO: replace with node http address (YARN-1884)
         .append("'>")
-        .append(
-          StringEscapeUtils.escapeJavaScript(StringEscapeUtils
-            .escapeHtml(container.getAssignedNodeId()))).append("</a>\",\"")
+        .append(container.getAssignedNodeId() == null ? "N/A" :
+            StringEscapeUtils.escapeJavaScript(StringEscapeUtils
+                .escapeHtml(container.getAssignedNodeId())))
+        .append("</a>\",\"")
         .append(container.getContainerExitStatus()).append("\",\"<a href='")
         .append(container.getLogUrl() == null ?
             "#" : container.getLogUrl()).append("'>")
@@ -187,4 +214,14 @@ public class AppAttemptBlock extends HtmlBlock {
 
     tbody._()._();
   }
+
+  private boolean hasAMContainer(ContainerId containerId,
+      Collection<ContainerReport> containers) {
+    for (ContainerReport container : containers) {
+      if (containerId.equals(container.getContainerId())) {
+        return true;
+      }
+    }
+    return false;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index 8fa4086..2db88ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -20,24 +20,47 @@ package org.apache.hadoop.yarn.server.webapp;
 
 import static org.apache.hadoop.yarn.util.StringHelper.join;
 import static org.apache.hadoop.yarn.webapp.YarnWebParams.APPLICATION_ID;
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.WEB_UI_TYPE;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
 
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
+import java.util.List;
 
 import org.apache.commons.lang.StringEscapeUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
 import org.apache.hadoop.yarn.util.Apps;
 import org.apache.hadoop.yarn.util.Times;
+import org.apache.hadoop.yarn.util.resource.Resources;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
@@ -47,16 +70,20 @@ import com.google.inject.Inject;
 
 public class AppBlock extends HtmlBlock {
 
-  protected ApplicationContext appContext;
+  private static final Log LOG = LogFactory.getLog(AppBlock.class);
+  protected ApplicationBaseProtocol appBaseProt;
+  protected Configuration conf;
 
   @Inject
-  AppBlock(ApplicationContext appContext, ViewContext ctx) {
+  AppBlock(ApplicationBaseProtocol appBaseProt, ViewContext ctx, Configuration conf) {
     super(ctx);
-    this.appContext = appContext;
+    this.appBaseProt = appBaseProt;
+    this.conf = conf;
   }
 
   @Override
   protected void render(Block html) {
+    String webUiType = $(WEB_UI_TYPE);
     String aid = $(APPLICATION_ID);
     if (aid.isEmpty()) {
       puts("Bad request: requires Application ID");
@@ -71,18 +98,21 @@ public class AppBlock extends HtmlBlock {
       return;
     }
 
-    final ApplicationId appIDFinal = appID;
     UserGroupInformation callerUGI = getCallerUGI();
-    ApplicationReport appReport;
+    ApplicationReport appReport = null;
     try {
+      final GetApplicationReportRequest request =
+          GetApplicationReportRequest.newInstance(appID);
       if (callerUGI == null) {
-        appReport = appContext.getApplication(appID);
+        appReport =
+            appBaseProt.getApplicationReport(request).getApplicationReport();
       } else {
         appReport = callerUGI.doAs(
             new PrivilegedExceptionAction<ApplicationReport> () {
           @Override
           public ApplicationReport run() throws Exception {
-            return appContext.getApplication(appIDFinal);
+            return appBaseProt.getApplicationReport(request)
+                .getApplicationReport();
           }
         });
       }
@@ -92,41 +122,90 @@ public class AppBlock extends HtmlBlock {
       html.p()._(message)._();
       return;
     }
+
     if (appReport == null) {
       puts("Application not found: " + aid);
       return;
     }
+
     AppInfo app = new AppInfo(appReport);
 
     setTitle(join("Application ", aid));
 
+    if (webUiType != null
+        && webUiType.equals(YarnWebParams.RM_WEB_UI)
+        && conf.getBoolean(YarnConfiguration.RM_WEBAPP_UI_ACTIONS_ENABLED,
+          YarnConfiguration.DEFAULT_RM_WEBAPP_UI_ACTIONS_ENABLED)) {
+      // Application Kill
+      html.div()
+        .button()
+          .$onclick("confirmAction()").b("Kill Application")._()
+          ._();
+
+      StringBuilder script = new StringBuilder();
+      script.append("function confirmAction() {")
+          .append(" b = confirm(\"Are you sure?\");")
+          .append(" if (b == true) {")
+          .append(" $.ajax({")
+          .append(" type: 'PUT',")
+          .append(" url: '/ws/v1/cluster/apps/").append(aid).append("/state',")
+          .append(" contentType: 'application/json',")
+          .append(" data: '{\"state\":\"KILLED\"}',")
+          .append(" dataType: 'json'")
+          .append(" }).done(function(data){")
+          .append(" setTimeout(function(){")
+          .append(" location.href = '/cluster/app/").append(aid).append("';")
+          .append(" }, 1000);")
+          .append(" }).fail(function(data){")
+          .append(" console.log(data);")
+          .append(" });")
+          .append(" }")
+          .append("}");
+
+      html.script().$type("text/javascript")._(script.toString())._();
+    }
+
     info("Application Overview")
       ._("User:", app.getUser())
       ._("Name:", app.getName())
       ._("Application Type:", app.getType())
-      ._("State:", app.getAppState())
-      ._("FinalStatus:", app.getFinalAppStatus())
+      ._("Application Tags:",
+        app.getApplicationTags() == null ? "" : app.getApplicationTags())
+      ._("YarnApplicationState:",
+        app.getAppState() == null ? UNAVAILABLE : clarifyAppState(app
+          .getAppState()))
+      ._("FinalStatus Reported by AM:",
+        clairfyAppFinalStatus(app.getFinalAppStatus()))
       ._("Started:", Times.format(app.getStartedTime()))
       ._(
         "Elapsed:",
         StringUtils.formatTime(Times.elapsed(app.getStartedTime(),
           app.getFinishedTime())))
       ._("Tracking URL:",
-        app.getTrackingUrl() == null ? "#" : root_url(app.getTrackingUrl()),
-        "History")._("Diagnostics:", app.getDiagnosticsInfo());
-
-    html._(InfoBlock.class);
+        app.getTrackingUrl() == null || app.getTrackingUrl() == UNAVAILABLE
+            ? null : root_url(app.getTrackingUrl()),
+        app.getTrackingUrl() == null || app.getTrackingUrl() == UNAVAILABLE
+            ? "Unassigned" : app.getAppState() == YarnApplicationState.FINISHED
+                || app.getAppState() == YarnApplicationState.FAILED
+                || app.getAppState() == YarnApplicationState.KILLED ? "History"
+                : "ApplicationMaster")
+      ._("Diagnostics:",
+          app.getDiagnosticsInfo() == null ? "" : app.getDiagnosticsInfo());
 
     Collection<ApplicationAttemptReport> attempts;
     try {
+      final GetApplicationAttemptsRequest request =
+          GetApplicationAttemptsRequest.newInstance(appID);
       if (callerUGI == null) {
-        attempts = appContext.getApplicationAttempts(appID).values();
+        attempts = appBaseProt.getApplicationAttempts(request)
+            .getApplicationAttemptList();
       } else {
         attempts = callerUGI.doAs(
             new PrivilegedExceptionAction<Collection<ApplicationAttemptReport>> () {
           @Override
           public Collection<ApplicationAttemptReport> run() throws Exception {
-            return appContext.getApplicationAttempts(appIDFinal).values();
+            return appBaseProt.getApplicationAttempts(request)
+                .getApplicationAttemptList();
           }
         });
       }
@@ -138,6 +217,34 @@ public class AppBlock extends HtmlBlock {
       return;
     }
 
+    //TODO:YARN-3284
+    //The preemption metrics will be exposed from ApplicationReport
+    // and ApplicationAttemptReport
+    ApplicationResourceUsageReport usageReport =
+        appReport.getApplicationResourceUsageReport();
+    DIV<Hamlet> pdiv = html.
+        _(InfoBlock.class).
+        div(_INFO_WRAP);
+    info("Application Overview").clear();
+    info("Application Metrics")
+        ._("Total Resource Preempted:",
+          Resources.none()) // TODO: YARN-3284
+        ._("Total Number of Non-AM Containers Preempted:",
+          String.valueOf(0)) // TODO: YARN-3284
+        ._("Total Number of AM Containers Preempted:",
+          String.valueOf(0)) // TODO: YARN-3284
+        ._("Resource Preempted from Current Attempt:",
+          Resources.none()) // TODO: YARN-3284
+        ._("Number of Non-AM Containers Preempted from Current Attempt:",
+          0) // TODO: YARN-3284
+        ._("Aggregate Resource Allocation:",
+          String.format("%d MB-seconds, %d vcore-seconds", usageReport == null
+            ? 0 : usageReport.getMemorySeconds(), usageReport == null ? 0
+            : usageReport.getVcoreSeconds()));
+    pdiv._();
+
+    html._(InfoBlock.class);
+
     // Application Attempt Table
     TBODY<TABLE<Hamlet>> tbody =
         html.table("#attempts").thead().tr().th(".id", "Attempt ID")
@@ -147,18 +254,28 @@ public class AppBlock extends HtmlBlock {
     StringBuilder attemptsTableData = new StringBuilder("[\n");
     for (final ApplicationAttemptReport appAttemptReport : attempts) {
       AppAttemptInfo appAttempt = new AppAttemptInfo(appAttemptReport);
-      ContainerReport containerReport;
+      ContainerReport containerReport = null;
       try {
+        // AM container is always the first container of the attempt
+        final GetContainerReportRequest request =
+            GetContainerReportRequest.newInstance(ContainerId.newContainerId(
+              appAttemptReport.getApplicationAttemptId(), 1));
         if (callerUGI == null) {
-          containerReport = appContext.getAMContainer(appAttemptReport
-              .getApplicationAttemptId());
+          containerReport =
+              appBaseProt.getContainerReport(request).getContainerReport();
         } else {
           containerReport = callerUGI.doAs(
               new PrivilegedExceptionAction<ContainerReport> () {
             @Override
             public ContainerReport run() throws Exception {
-              return appContext.getAMContainer(appAttemptReport
-                  .getApplicationAttemptId());
+              ContainerReport report = null;
+              try {
+                report = appBaseProt.getContainerReport(request)
+                    .getContainerReport();
+              } catch (ContainerNotFoundException ex) {
+                LOG.warn(ex.getMessage());
+              }
+              return report;
             }
           });
         }
@@ -170,7 +287,7 @@ public class AppBlock extends HtmlBlock {
         html.p()._(message)._();
         return;
       }
-      long startTime = Long.MAX_VALUE;
+      long startTime = 0L;
       String logsLink = null;
       if (containerReport != null) {
         ContainerInfo container = new ContainerInfo(containerReport);
@@ -192,14 +309,12 @@ public class AppBlock extends HtmlBlock {
         .append("</a>\",\"")
         .append(startTime)
         .append("\",\"<a href='")
-        .append(
-          nodeLink == null ? "#" : url("//", nodeLink))
+        .append("#") // TODO: replace with node http address (YARN-1884)
         .append("'>")
-        .append(
-          nodeLink == null ? "N/A" : StringEscapeUtils
+        .append(nodeLink == null ? "N/A" : StringEscapeUtils
             .escapeJavaScript(StringEscapeUtils.escapeHtml(nodeLink)))
-        .append("</a>\",\"<a href='")
-        .append(logsLink == null ? "#" : logsLink).append("'>")
+        .append("</a>\",\"<a ")
+        .append(logsLink == null ? "#" : "href='" + logsLink).append("'>")
         .append(logsLink == null ? "N/A" : "Logs").append("</a>\"],\n");
     }
     if (attemptsTableData.charAt(attemptsTableData.length() - 2) == ',') {
@@ -211,5 +326,108 @@ public class AppBlock extends HtmlBlock {
       ._("var attemptsTableData=" + attemptsTableData)._();
 
     tbody._()._();
+
+    createContainerLocalityTable(html); //TODO:YARN-3284
+    createResourceRequestsTable(html, null); //TODO:YARN-3284
+  }
+
+  //TODO: YARN-3284
+  //The containerLocality metrics will be exposed from AttemptReport
+  private void createContainerLocalityTable(Block html) {
+    int totalAllocatedContainers = 0; //TODO: YARN-3284
+    int[][] localityStatistics = new int[0][0];//TODO:YARN-3284
+    DIV<Hamlet> div = html.div(_INFO_WRAP);
+    TABLE<DIV<Hamlet>> table =
+        div.h3(
+          "Total Allocated Containers: "
+              + totalAllocatedContainers).h3("Each table cell"
+            + " represents the number of NodeLocal/RackLocal/OffSwitch containers"
+            + " satisfied by NodeLocal/RackLocal/OffSwitch resource requests.").table(
+          "#containerLocality");
+    table.
+      tr().
+        th(_TH, "").
+        th(_TH, "Node Local Request").
+        th(_TH, "Rack Local Request").
+        th(_TH, "Off Switch Request").
+      _();
+
+    String[] containersType =
+        { "Num Node Local Containers (satisfied by)", "Num Rack Local Containers (satisfied by)",
+            "Num Off Switch Containers (satisfied by)" };
+    boolean odd = false;
+    for (int i = 0; i < localityStatistics.length; i++) {
+      table.tr((odd = !odd) ? _ODD : _EVEN).td(containersType[i])
+        .td(String.valueOf(localityStatistics[i][0]))
+        .td(i == 0 ? "" : String.valueOf(localityStatistics[i][1]))
+        .td(i <= 1 ? "" : String.valueOf(localityStatistics[i][2]))._();
+    }
+    table._();
+    div._();
+  }
+
+  //TODO:YARN-3284
+  //The resource requests metrics will be exposed from attemptReport
+  private void createResourceRequestsTable(Block html, List<ResourceRequest> resouceRequests) {
+    TBODY<TABLE<Hamlet>> tbody =
+        html.table("#ResourceRequests").thead().tr()
+          .th(".priority", "Priority")
+          .th(".resourceName", "ResourceName")
+          .th(".totalResource", "Capability")
+          .th(".numContainers", "NumContainers")
+          .th(".relaxLocality", "RelaxLocality")
+          .th(".nodeLabelExpression", "NodeLabelExpression")._()._().tbody();
+
+    Resource totalResource = Resource.newInstance(0, 0);
+    if (resouceRequests != null) {
+      for (ResourceRequest request : resouceRequests) {
+        if (request.getNumContainers() == 0) {
+          continue;
+        }
+
+        tbody.tr()
+          .td(String.valueOf(request.getPriority()))
+          .td(request.getResourceName())
+          .td(String.valueOf(request.getCapability()))
+          .td(String.valueOf(request.getNumContainers()))
+          .td(String.valueOf(request.getRelaxLocality()))
+          .td(request.getNodeLabelExpression() == null ? "N/A" : request
+              .getNodeLabelExpression())._();
+        if (request.getResourceName().equals(ResourceRequest.ANY)) {
+          Resources.addTo(totalResource,
+            Resources.multiply(request.getCapability(),
+              request.getNumContainers()));
+        }
+      }
+    }
+    html.div().$class("totalResourceRequests")
+      .h3("Total Outstanding Resource Requests: " + totalResource)._();
+    tbody._()._();
+  }
+
+  private String clarifyAppState(YarnApplicationState state) {
+    String ret = state.toString();
+    switch (state) {
+    case NEW:
+      return ret + ": waiting for application to be initialized";
+    case NEW_SAVING:
+      return ret + ": waiting for application to be persisted in state-store.";
+    case SUBMITTED:
+      return ret + ": waiting for application to be accepted by scheduler.";
+    case ACCEPTED:
+      return ret + ": waiting for AM container to be allocated, launched and"
+          + " register with RM.";
+    case RUNNING:
+      return ret + ": AM has registered with RM and started running.";
+    default:
+      return ret;
+    }
+  }
+
+  private String clairfyAppFinalStatus(FinalApplicationStatus status) {
+    if (status == FinalApplicationStatus.UNDEFINED) {
+      return "Application has not completed yet.";
+    }
+    return status.toString();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java
index f341cf6..161486d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java
@@ -25,13 +25,16 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR_VALUE;
 
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
-import java.util.HashSet;
+import java.util.EnumSet;
 
 import org.apache.commons.lang.StringEscapeUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
 import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
@@ -42,12 +45,13 @@ import com.google.inject.Inject;
 
 public class AppsBlock extends HtmlBlock {
 
-  protected ApplicationContext appContext;
+  private static final Log LOG = LogFactory.getLog(AppsBlock.class);
+  protected ApplicationBaseProtocol appBaseProt;
 
   @Inject
-  AppsBlock(ApplicationContext appContext, ViewContext ctx) {
+  AppsBlock(ApplicationBaseProtocol appBaseProt, ViewContext ctx) {
     super(ctx);
-    this.appContext = appContext;
+    this.appBaseProt = appBaseProt;
   }
 
   @Override
@@ -61,27 +65,29 @@ public class AppsBlock extends HtmlBlock {
           .th(".finishtime", "FinishTime").th(".state", "State")
           .th(".finalstatus", "FinalStatus").th(".progress", "Progress")
           .th(".ui", "Tracking UI")._()._().tbody();
-    Collection<YarnApplicationState> reqAppStates = null;
+    EnumSet<YarnApplicationState> reqAppStates =
+        EnumSet.noneOf(YarnApplicationState.class);
     String reqStateString = $(APP_STATE);
     if (reqStateString != null && !reqStateString.isEmpty()) {
       String[] appStateStrings = reqStateString.split(",");
-      reqAppStates = new HashSet<YarnApplicationState>(appStateStrings.length);
       for (String stateString : appStateStrings) {
-        reqAppStates.add(YarnApplicationState.valueOf(stateString));
+        reqAppStates.add(YarnApplicationState.valueOf(stateString.trim()));
       }
     }
 
     UserGroupInformation callerUGI = getCallerUGI();
-    Collection<ApplicationReport> appReports;
+    Collection<ApplicationReport> appReports = null;
     try {
+      final GetApplicationsRequest request =
+          GetApplicationsRequest.newInstance(reqAppStates);
       if (callerUGI == null) {
-        appReports = appContext.getAllApplications().values();
+        appReports = appBaseProt.getApplications(request).getApplicationList();
       } else {
         appReports = callerUGI.doAs(
             new PrivilegedExceptionAction<Collection<ApplicationReport>> () {
           @Override
           public Collection<ApplicationReport> run() throws Exception {
-            return appContext.getAllApplications().values();
+            return appBaseProt.getApplications(request).getApplicationList();
           }
         });
       }
@@ -93,12 +99,15 @@ public class AppsBlock extends HtmlBlock {
     }
     StringBuilder appsTableData = new StringBuilder("[\n");
     for (ApplicationReport appReport : appReports) {
-      if (reqAppStates != null
+      // TODO: remove the following condition. It is still here because
+      // the history side implementation of ApplicationBaseProtocol
+      // hasn't filtering capability (YARN-1819).
+      if (!reqAppStates.isEmpty()
           && !reqAppStates.contains(appReport.getYarnApplicationState())) {
         continue;
       }
       AppInfo app = new AppInfo(appReport);
-      String percent = String.format("%.1f", app.getProgress() * 100.0F);
+      String percent = String.format("%.1f", app.getProgress());
       // AppID numerical value parsed by parseHadoopID in yarn.dt.plugins.js
       appsTableData
         .append("[\"<a href='")
@@ -123,7 +132,7 @@ public class AppsBlock extends HtmlBlock {
             .getQueue()))).append("\",\"").append(app.getStartedTime())
         .append("\",\"").append(app.getFinishedTime())
         .append("\",\"")
-        .append(app.getAppState())
+        .append(app.getAppState() == null ? UNAVAILABLE : app.getAppState())
         .append("\",\"")
         .append(app.getFinalAppStatus())
         .append("\",\"")
@@ -132,13 +141,21 @@ public class AppsBlock extends HtmlBlock {
         .append(C_PROGRESSBAR).append("' title='").append(join(percent, '%'))
         .append("'> ").append("<div class='").append(C_PROGRESSBAR_VALUE)
         .append("' style='").append(join("width:", percent, '%'))
-        .append("'> </div> </div>").append("\",\"<a href='");
+        .append("'> </div> </div>").append("\",\"<a ");
 
       String trackingURL =
-          app.getTrackingUrl() == null ? "#" : app.getTrackingUrl();
+          app.getTrackingUrl() == null || app.getTrackingUrl() == UNAVAILABLE
+              ? null : app.getTrackingUrl();
 
-      appsTableData.append(trackingURL).append("'>").append("History")
-        .append("</a>\"],\n");
+      String trackingUI =
+          app.getTrackingUrl() == null || app.getTrackingUrl() == UNAVAILABLE
+              ? "Unassigned"
+              : app.getAppState() == YarnApplicationState.FINISHED
+                  || app.getAppState() == YarnApplicationState.FAILED
+                  || app.getAppState() == YarnApplicationState.KILLED
+                  ? "History" : "ApplicationMaster";
+      appsTableData.append(trackingURL == null ? "#" : "href='" + trackingURL)
+        .append("'>").append(trackingUI).append("</a>\"],\n");
 
     }
     if (appsTableData.charAt(appsTableData.length() - 2) == ',') {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java
index 2bb48a8..ed50c7a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java
@@ -26,9 +26,10 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Times;
@@ -40,12 +41,12 @@ import com.google.inject.Inject;
 public class ContainerBlock extends HtmlBlock {
 
   private static final Log LOG = LogFactory.getLog(ContainerBlock.class);
-  private final ApplicationContext appContext;
+  protected ApplicationBaseProtocol appBaseProt;
 
   @Inject
-  public ContainerBlock(ApplicationContext appContext, ViewContext ctx) {
+  public ContainerBlock(ApplicationBaseProtocol appBaseProt, ViewContext ctx) {
     super(ctx);
-    this.appContext = appContext;
+    this.appBaseProt = appBaseProt;
   }
 
   @Override
@@ -64,18 +65,21 @@ public class ContainerBlock extends HtmlBlock {
       return;
     }
 
-    final ContainerId containerIdFinal = containerId;
     UserGroupInformation callerUGI = getCallerUGI();
-    ContainerReport containerReport;
+    ContainerReport containerReport = null;
     try {
+      final GetContainerReportRequest request =
+          GetContainerReportRequest.newInstance(containerId);
       if (callerUGI == null) {
-        containerReport = appContext.getContainer(containerId);
+        containerReport = appBaseProt.getContainerReport(request)
+            .getContainerReport();
       } else {
         containerReport = callerUGI.doAs(
             new PrivilegedExceptionAction<ContainerReport> () {
           @Override
           public ContainerReport run() throws Exception {
-            return appContext.getContainer(containerIdFinal);
+            return appBaseProt.getContainerReport(request)
+                .getContainerReport();
           }
         });
       }
@@ -85,6 +89,7 @@ public class ContainerBlock extends HtmlBlock {
       html.p()._(message)._();
       return;
     }
+
     if (containerReport == null) {
       puts("Container not found: " + containerid);
       return;
@@ -94,7 +99,10 @@ public class ContainerBlock extends HtmlBlock {
     setTitle(join("Container ", containerid));
 
     info("Container Overview")
-      ._("State:", container.getContainerState())
+      ._(
+        "Container State:",
+        container.getContainerState() == null ? UNAVAILABLE : container
+          .getContainerState())
       ._("Exit Status:", container.getContainerExitStatus())
       ._("Node:", container.getAssignedNodeId())
       ._("Priority:", container.getPriority())
@@ -109,7 +117,8 @@ public class ContainerBlock extends HtmlBlock {
             + container.getAllocatedVCores() + " VCores")
       ._("Logs:", container.getLogUrl() == null ? "#" : container.getLogUrl(),
           container.getLogUrl() == null ? "N/A" : "Logs")
-      ._("Diagnostics:", container.getDiagnosticsInfo());
+      ._("Diagnostics:", container.getDiagnosticsInfo() == null ?
+          "" : container.getDiagnosticsInfo());
 
     html._(InfoBlock.class);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
new file mode 100644
index 0000000..384a976
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.webapp;
+
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
+
+
+public class WebPageUtils {
+
+  public static String appsTableInit() {
+    return appsTableInit(false);
+  }
+
+  public static String appsTableInit(boolean isFairSchedulerPage) {
+    // id, user, name, queue, starttime, finishtime, state, status, progress, ui
+    // FairSchedulerPage's table is a bit different
+    return tableInit()
+      .append(", 'aaData': appsTableData")
+      .append(", bDeferRender: true")
+      .append(", bProcessing: true")
+      .append("\n, aoColumnDefs: ")
+      .append(getAppsTableColumnDefs(isFairSchedulerPage))
+      // Sort by id upon page load
+      .append(", aaSorting: [[0, 'desc']]}").toString();
+  }
+
+  private static String getAppsTableColumnDefs(boolean isFairSchedulerPage) {
+    StringBuilder sb = new StringBuilder();
+    return sb
+      .append("[\n")
+      .append("{'sType':'numeric', 'aTargets': [0]")
+      .append(", 'mRender': parseHadoopID }")
+      .append("\n, {'sType':'numeric', 'aTargets': " +
+          (isFairSchedulerPage ? "[6, 7]": "[5, 6]"))
+      .append(", 'mRender': renderHadoopDate }")
+      .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets': [9]")
+      .append(", 'mRender': parseHadoopProgress }]").toString();
+  }
+
+  public static String attemptsTableInit() {
+    return tableInit().append(", 'aaData': attemptsTableData")
+      .append(", bDeferRender: true").append(", bProcessing: true")
+      .append("\n, aoColumnDefs: ").append(getAttemptsTableColumnDefs())
+      // Sort by id upon page load
+      .append(", aaSorting: [[0, 'desc']]}").toString();
+  }
+
+  private static String getAttemptsTableColumnDefs() {
+    StringBuilder sb = new StringBuilder();
+    return sb.append("[\n").append("{'sType':'numeric', 'aTargets': [0]")
+      .append(", 'mRender': parseHadoopID }")
+      .append("\n, {'sType':'numeric', 'aTargets': [1]")
+      .append(", 'mRender': renderHadoopDate }]").toString();
+  }
+
+  public static String containersTableInit() {
+    return tableInit().append(", 'aaData': containersTableData")
+      .append(", bDeferRender: true").append(", bProcessing: true")
+      .append("\n, aoColumnDefs: ").append(getContainersTableColumnDefs())
+      // Sort by id upon page load
+      .append(", aaSorting: [[0, 'desc']]}").toString();
+  }
+
+  private static String getContainersTableColumnDefs() {
+    StringBuilder sb = new StringBuilder();
+    return sb.append("[\n").append("{'sType':'numeric', 'aTargets': [0]")
+      .append(", 'mRender': parseHadoopID }]").toString();
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
index 6d94737..909bf1d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
@@ -40,7 +40,13 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
@@ -54,10 +60,10 @@ import org.apache.hadoop.yarn.webapp.NotFoundException;
 
 public class WebServices {
 
-  protected ApplicationContext appContext;
+  protected ApplicationBaseProtocol appBaseProt;
 
-  public WebServices(ApplicationContext appContext) {
-    this.appContext = appContext;
+  public WebServices(ApplicationBaseProtocol appBaseProt) {
+    this.appBaseProt = appBaseProt;
   }
 
   public AppsInfo getApps(HttpServletRequest req, HttpServletResponse res,
@@ -144,13 +150,17 @@ public class WebServices {
     Collection<ApplicationReport> appReports = null;
     try {
       if (callerUGI == null) {
-        appReports = appContext.getAllApplications().values();
+        // TODO: the request should take the params like what RMWebServices does
+        // in YARN-1819.
+        GetApplicationsRequest request = GetApplicationsRequest.newInstance();
+        appReports = appBaseProt.getApplications(request).getApplicationList();
       } else {
         appReports = callerUGI.doAs(
             new PrivilegedExceptionAction<Collection<ApplicationReport>> () {
           @Override
           public Collection<ApplicationReport> run() throws Exception {
-            return appContext.getAllApplications().values();
+            return appBaseProt.getApplications(
+                GetApplicationsRequest.newInstance()).getApplicationList();
           }
         });
       }
@@ -214,13 +224,17 @@ public class WebServices {
     ApplicationReport app = null;
     try {
       if (callerUGI == null) {
-        app = appContext.getApplication(id);
+        GetApplicationReportRequest request =
+            GetApplicationReportRequest.newInstance(id);
+        app = appBaseProt.getApplicationReport(request).getApplicationReport();
       } else {
         app = callerUGI.doAs(
             new PrivilegedExceptionAction<ApplicationReport> () {
           @Override
           public ApplicationReport run() throws Exception {
-            return appContext.getApplication(id);
+            GetApplicationReportRequest request =
+                GetApplicationReportRequest.newInstance(id);
+            return appBaseProt.getApplicationReport(request).getApplicationReport();
           }
         });
       }
@@ -240,13 +254,20 @@ public class WebServices {
     Collection<ApplicationAttemptReport> appAttemptReports = null;
     try {
       if (callerUGI == null) {
-        appAttemptReports = appContext.getApplicationAttempts(id).values();
+        GetApplicationAttemptsRequest request =
+            GetApplicationAttemptsRequest.newInstance(id);
+        appAttemptReports =
+            appBaseProt.getApplicationAttempts(request)
+              .getApplicationAttemptList();
       } else {
         appAttemptReports = callerUGI.doAs(
             new PrivilegedExceptionAction<Collection<ApplicationAttemptReport>> () {
           @Override
           public Collection<ApplicationAttemptReport> run() throws Exception {
-            return appContext.getApplicationAttempts(id).values();
+            GetApplicationAttemptsRequest request =
+                GetApplicationAttemptsRequest.newInstance(id);
+            return appBaseProt.getApplicationAttempts(request)
+                  .getApplicationAttemptList();
           }
         });
       }
@@ -271,13 +292,20 @@ public class WebServices {
     ApplicationAttemptReport appAttempt = null;
     try {
       if (callerUGI == null) {
-        appAttempt = appContext.getApplicationAttempt(aaid);
+        GetApplicationAttemptReportRequest request =
+            GetApplicationAttemptReportRequest.newInstance(aaid);
+        appAttempt =
+            appBaseProt.getApplicationAttemptReport(request)
+              .getApplicationAttemptReport();
       } else {
         appAttempt = callerUGI.doAs(
             new PrivilegedExceptionAction<ApplicationAttemptReport> () {
           @Override
           public ApplicationAttemptReport run() throws Exception {
-            return appContext.getApplicationAttempt(aaid);
+            GetApplicationAttemptReportRequest request =
+                GetApplicationAttemptReportRequest.newInstance(aaid);
+            return appBaseProt.getApplicationAttemptReport(request)
+                  .getApplicationAttemptReport();
           }
         });
       }
@@ -300,13 +328,16 @@ public class WebServices {
     Collection<ContainerReport> containerReports = null;
     try {
       if (callerUGI == null) {
-        containerReports = appContext.getContainers(aaid).values();
+        GetContainersRequest request = GetContainersRequest.newInstance(aaid);
+        containerReports =
+            appBaseProt.getContainers(request).getContainerList();
       } else {
         containerReports = callerUGI.doAs(
             new PrivilegedExceptionAction<Collection<ContainerReport>> () {
           @Override
           public Collection<ContainerReport> run() throws Exception {
-            return appContext.getContainers(aaid).values();
+            GetContainersRequest request = GetContainersRequest.newInstance(aaid);
+            return appBaseProt.getContainers(request).getContainerList();
           }
         });
       }
@@ -332,13 +363,18 @@ public class WebServices {
     ContainerReport container = null;
     try {
       if (callerUGI == null) {
-        container = appContext.getContainer(cid);
+        GetContainerReportRequest request =
+            GetContainerReportRequest.newInstance(cid);
+        container =
+            appBaseProt.getContainerReport(request).getContainerReport();
       } else {
         container = callerUGI.doAs(
             new PrivilegedExceptionAction<ContainerReport> () {
           @Override
           public ContainerReport run() throws Exception {
-            return appContext.getContainer(cid);
+            GetContainerReportRequest request =
+                GetContainerReportRequest.newInstance(cid);
+            return appBaseProt.getContainerReport(request).getContainerReport();
           }
         });
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
index d78f928..e8b1acc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.yarn.server.webapp.dao;
 
+import static org.apache.hadoop.yarn.util.StringHelper.CSV_JOINER;
+
 import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlRootElement;
@@ -49,6 +51,7 @@ public class AppInfo {
   protected long startedTime;
   protected long finishedTime;
   protected long elapsedTime;
+  protected String applicationTags;
 
   public AppInfo() {
     // JAXB needs this
@@ -74,7 +77,10 @@ public class AppInfo {
     finishedTime = app.getFinishTime();
     elapsedTime = Times.elapsed(startedTime, finishedTime);
     finalAppStatus = app.getFinalApplicationStatus();
-    progress = app.getProgress();
+    progress = app.getProgress() * 100; // in percent
+    if (app.getApplicationTags() != null && !app.getApplicationTags().isEmpty()) {
+      this.applicationTags = CSV_JOINER.join(app.getApplicationTags());
+    }
   }
 
   public String getAppId() {
@@ -149,4 +155,7 @@ public class AppInfo {
     return elapsedTime;
   }
 
+  public String getApplicationTags() {
+    return applicationTags;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
new file mode 100644
index 0000000..92eae48
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
+
+import org.apache.hadoop.yarn.server.webapp.AppAttemptBlock;
+import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
+
+
+public class AppAttemptPage extends RmView {
+
+  @Override
+  protected void preHead(Page.HTML<_> html) {
+    commonPreHead(html);
+
+    String appAttemptId = $(YarnWebParams.APPLICATION_ATTEMPT_ID);
+    set(
+      TITLE,
+      appAttemptId.isEmpty() ? "Bad request: missing application attempt ID"
+          : join("Application Attempt ",
+            $(YarnWebParams.APPLICATION_ATTEMPT_ID)));
+
+    set(DATATABLES_ID, "containers");
+    set(initID(DATATABLES, "containers"), WebPageUtils.containersTableInit());
+    setTableStyles(html, "containers", ".queue {width:6em}", ".ui {width:8em}");
+  }
+
+  @Override
+  protected Class<? extends SubView> content() {
+    return AppAttemptBlock.class;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
deleted file mode 100644
index 00508b8..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
+++ /dev/null
@@ -1,344 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp;
-
-import static org.apache.hadoop.yarn.util.StringHelper.join;
-import static org.apache.hadoop.yarn.webapp.YarnWebParams.APPLICATION_ID;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
-
-import java.util.Collection;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.QueueACL;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
-import org.apache.hadoop.yarn.util.Apps;
-import org.apache.hadoop.yarn.util.Times;
-import org.apache.hadoop.yarn.util.resource.Resources;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
-import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
-import org.apache.hadoop.yarn.webapp.view.InfoBlock;
-
-import com.google.inject.Inject;
-
-public class AppBlock extends HtmlBlock {
-
-  private final Configuration conf;
-  private final ResourceManager rm;
-  private final boolean rmWebAppUIActions;
-
-  @Inject
-  AppBlock(ResourceManager rm, ViewContext ctx, Configuration conf) {
-    super(ctx);
-    this.conf = conf;
-    this.rm = rm;
-    this.rmWebAppUIActions =
-        conf.getBoolean(YarnConfiguration.RM_WEBAPP_UI_ACTIONS_ENABLED,
-                YarnConfiguration.DEFAULT_RM_WEBAPP_UI_ACTIONS_ENABLED);
-  }
-
-  @Override
-  protected void render(Block html) {
-    String aid = $(APPLICATION_ID);
-    if (aid.isEmpty()) {
-      puts("Bad request: requires application ID");
-      return;
-    }
-
-    ApplicationId appID = null;
-    try {
-      appID = Apps.toAppID(aid);
-    } catch (Exception e) {
-      puts("Invalid Application ID: " + aid);
-      return;
-    }
-
-    RMContext context = this.rm.getRMContext();
-    RMApp rmApp = context.getRMApps().get(appID);
-    if (rmApp == null) {
-      puts("Application not found: "+ aid);
-      return;
-    }
-    AppInfo app =
-        new AppInfo(rm, rmApp, true, WebAppUtils.getHttpSchemePrefix(conf));
-
-    // Check for the authorization.
-    String remoteUser = request().getRemoteUser();
-    UserGroupInformation callerUGI = null;
-    if (remoteUser != null) {
-      callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
-    }
-    if (callerUGI != null
-        && !(this.rm.getApplicationACLsManager().checkAccess(callerUGI,
-            ApplicationAccessType.VIEW_APP, app.getUser(), appID) || this.rm
-            .getQueueACLsManager().checkAccess(callerUGI,
-                QueueACL.ADMINISTER_QUEUE, app.getQueue()))) {
-      puts("You (User " + remoteUser
-          + ") are not authorized to view application " + appID);
-      return;
-    }
-
-    setTitle(join("Application ", aid));
-
-    if (rmWebAppUIActions) {
-      // Application Kill
-      html.div()
-        .button()
-          .$onclick("confirmAction()").b("Kill Application")._()
-          ._();
-
-      StringBuilder script = new StringBuilder();
-      script.append("function confirmAction() {")
-          .append(" b = confirm(\"Are you sure?\");")
-          .append(" if (b == true) {")
-          .append(" $.ajax({")
-          .append(" type: 'PUT',")
-          .append(" url: '/ws/v1/cluster/apps/").append(aid).append("/state',")
-          .append(" contentType: 'application/json',")
-          .append(" data: '{\"state\":\"KILLED\"}',")
-          .append(" dataType: 'json'")
-          .append(" }).done(function(data){")
-          .append(" setTimeout(function(){")
-          .append(" location.href = '/cluster/app/").append(aid).append("';")
-          .append(" }, 1000);")
-          .append(" }).fail(function(data){")
-          .append(" console.log(data);")
-          .append(" });")
-          .append(" }")
-          .append("}");
-
-      html.script().$type("text/javascript")._(script.toString())._();
-    }
-
-    RMAppMetrics appMerics = rmApp.getRMAppMetrics();
-    
-    // Get attempt metrics and fields, it is possible currentAttempt of RMApp is
-    // null. In that case, we will assume resource preempted and number of Non
-    // AM container preempted on that attempt is 0
-    RMAppAttemptMetrics attemptMetrics;
-    if (null == rmApp.getCurrentAppAttempt()) {
-      attemptMetrics = null;
-    } else {
-      attemptMetrics = rmApp.getCurrentAppAttempt().getRMAppAttemptMetrics();
-    }
-    Resource attemptResourcePreempted =
-        attemptMetrics == null ? Resources.none() : attemptMetrics
-            .getResourcePreempted();
-    int attemptNumNonAMContainerPreempted =
-        attemptMetrics == null ? 0 : attemptMetrics
-            .getNumNonAMContainersPreempted();
-    
-    info("Application Overview")
-        ._("User:", app.getUser())
-        ._("Name:", app.getName())
-        ._("Application Type:", app.getApplicationType())
-        ._("Application Tags:", app.getApplicationTags())
-        ._("YarnApplicationState:", clarifyAppState(app.getState()))
-        ._("FinalStatus Reported by AM:",
-          clairfyAppFinalStatus(app.getFinalStatus()))
-        ._("Started:", Times.format(app.getStartTime()))
-        ._("Elapsed:",
-            StringUtils.formatTime(Times.elapsed(app.getStartTime(),
-                app.getFinishTime())))
-        ._("Tracking URL:",
-            !app.isTrackingUrlReady() ? "#" : app.getTrackingUrlPretty(),
-            app.getTrackingUI())
-        ._("Diagnostics:", app.getNote());
-
-    DIV<Hamlet> pdiv = html.
-        _(InfoBlock.class).
-        div(_INFO_WRAP);
-    info("Application Overview").clear();
-    info("Application Metrics")
-        ._("Total Resource Preempted:",
-          appMerics.getResourcePreempted())
-        ._("Total Number of Non-AM Containers Preempted:",
-          String.valueOf(appMerics.getNumNonAMContainersPreempted()))
-        ._("Total Number of AM Containers Preempted:",
-          String.valueOf(appMerics.getNumAMContainersPreempted()))
-        ._("Resource Preempted from Current Attempt:",
-          attemptResourcePreempted)
-        ._("Number of Non-AM Containers Preempted from Current Attempt:",
-          attemptNumNonAMContainerPreempted)
-        ._("Aggregate Resource Allocation:",
-          String.format("%d MB-seconds, %d vcore-seconds", 
-              appMerics.getMemorySeconds(), appMerics.getVcoreSeconds()));
-    pdiv._();
-
-    Collection<RMAppAttempt> attempts = rmApp.getAppAttempts().values();
-    String amString =
-        attempts.size() == 1 ? "ApplicationMaster" : "ApplicationMasters";
-
-    DIV<Hamlet> div = html.
-        _(InfoBlock.class).
-        div(_INFO_WRAP);
-    // MRAppMasters Table
-    TABLE<DIV<Hamlet>> table = div.table("#app");
-    table.
-      tr().
-        th(amString).
-      _().
-      tr().
-        th(_TH, "Attempt Number").
-        th(_TH, "Start Time").
-        th(_TH, "Node").
-        th(_TH, "Logs").
-      _();
-
-    boolean odd = false;
-    for (RMAppAttempt attempt : attempts) {
-      AppAttemptInfo attemptInfo = new AppAttemptInfo(attempt, app.getUser());
-      table.tr((odd = !odd) ? _ODD : _EVEN).
-        td(String.valueOf(attemptInfo.getAttemptId())).
-        td(Times.format(attemptInfo.getStartTime())).
-        td().a(".nodelink", url("//",
-            attemptInfo.getNodeHttpAddress()),
-            attemptInfo.getNodeHttpAddress())._().
-        td().a(".logslink", url(attemptInfo.getLogsLink()), "logs")._().
-      _();
-    }
-
-    table._();
-    div._();
-
-    createContainerLocalityTable(html, attemptMetrics);
-    createResourceRequestsTable(html, app);
-  }
-
-  private void createContainerLocalityTable(Block html,
-      RMAppAttemptMetrics attemptMetrics) {
-    if (attemptMetrics == null) {
-      return;
-    }
-
-    DIV<Hamlet> div = html.div(_INFO_WRAP);
-    TABLE<DIV<Hamlet>> table =
-        div.h3(
-          "Total Allocated Containers: "
-              + attemptMetrics.getTotalAllocatedContainers()).h3("Each table cell"
-            + " represents the number of NodeLocal/RackLocal/OffSwitch containers"
-            + " satisfied by NodeLocal/RackLocal/OffSwitch resource requests.").table(
-          "#containerLocality");
-    table.
-      tr().
-        th(_TH, "").
-        th(_TH, "Node Local Request").
-        th(_TH, "Rack Local Request").
-        th(_TH, "Off Switch Request").
-      _();
-
-    String[] containersType =
-        { "Num Node Local Containers (satisfied by)", "Num Rack Local Containers (satisfied by)",
-            "Num Off Switch Containers (satisfied by)" };
-    boolean odd = false;
-    for (int i = 0; i < attemptMetrics.getLocalityStatistics().length; i++) {
-      table.tr((odd = !odd) ? _ODD : _EVEN).td(containersType[i])
-        .td(String.valueOf(attemptMetrics.getLocalityStatistics()[i][0]))
-        .td(i == 0 ? "" : String.valueOf(attemptMetrics.getLocalityStatistics()[i][1]))
-        .td(i <= 1 ? "" : String.valueOf(attemptMetrics.getLocalityStatistics()[i][2]))._();
-    }
-    table._();
-    div._();
-  }
-
-  private void createResourceRequestsTable(Block html, AppInfo app) {
-    TBODY<TABLE<Hamlet>> tbody =
-        html.table("#ResourceRequests").thead().tr()
-          .th(".priority", "Priority")
-          .th(".resourceName", "Resource Name")
-          .th(".totalResource", "Capability")
-          .th(".numContainers", "Num Containers")
-          .th(".relaxLocality", "Relax Locality")
-          .th(".nodeLabelExpression", "Node Label Expression")._()._().tbody();
-
-    Resource totalResource = Resource.newInstance(0, 0);
-    if (app.getResourceRequests() != null) {
-      for (ResourceRequest request : app.getResourceRequests()) {
-        if (request.getNumContainers() == 0) {
-          continue;
-        }
-
-        tbody.tr()
-          .td(String.valueOf(request.getPriority()))
-          .td(request.getResourceName())
-          .td(String.valueOf(request.getCapability()))
-          .td(String.valueOf(request.getNumContainers()))
-          .td(String.valueOf(request.getRelaxLocality()))
-          .td(request.getNodeLabelExpression() == null ? "N/A" : request
-              .getNodeLabelExpression())._();
-        if (request.getResourceName().equals(ResourceRequest.ANY)) {
-          Resources.addTo(totalResource,
-            Resources.multiply(request.getCapability(),
-              request.getNumContainers()));
-        }
-      }
-    }
-    html.div().$class("totalResourceRequests")
-      .h3("Total Outstanding Resource Requests: " + totalResource)._();
-    tbody._()._();
-  }
-
-  private String clarifyAppState(YarnApplicationState state) {
-    String ret = state.toString();
-    switch (state) {
-    case NEW:
-      return ret + ": waiting for application to be initialized";
-    case NEW_SAVING:
-      return ret + ": waiting for application to be persisted in state-store.";
-    case SUBMITTED:
-      return ret + ": waiting for application to be accepted by scheduler.";
-    case ACCEPTED:
-      return ret + ": waiting for AM container to be allocated, launched and"
-          + " register with RM.";
-    case RUNNING:
-      return ret + ": AM has registered with RM and started running.";
-    default:
-      return ret;
-    }
-  }
-
-  private String clairfyAppFinalStatus(FinalApplicationStatus status) {
-    if (status == FinalApplicationStatus.UNDEFINED) {
-      return "Application has not completed yet.";
-    }
-    return status.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bfd087/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java
index 8993324..9f9b7c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java
@@ -18,19 +18,38 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
 
+import org.apache.hadoop.yarn.server.webapp.AppBlock;
+import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
 import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
 
 public class AppPage extends RmView {
 
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override 
+  protected void preHead(Page.HTML<_> html) {
     commonPreHead(html);
-    set(DATATABLES_ID, "ResourceRequests");
+    String appId = $(YarnWebParams.APPLICATION_ID);
+    set(
+      TITLE,
+      appId.isEmpty() ? "Bad request: missing application ID" : join(
+        "Application ", $(YarnWebParams.APPLICATION_ID)));
+
+    set(DATATABLES_ID, "attempts ResourceRequests");
+    set(initID(DATATABLES, "attempts"), WebPageUtils.attemptsTableInit());
+    setTableStyles(html, "attempts", ".queue {width:6em}", ".ui {width:8em}");
+
     setTableStyles(html, "ResourceRequests");
+
+    set(YarnWebParams.WEB_UI_TYPE, YarnWebParams.RM_WEB_UI);
   }
 
-  @Override protected Class<? extends SubView> content() {
+  @Override 
+  protected Class<? extends SubView> content() {
     return AppBlock.class;
   }
 }


[29/49] hadoop git commit: HADOOP-11668. hadoop-daemons.sh bw compat broke with --slaves change (Vinayakumar B via aw)

Posted by zj...@apache.org.
HADOOP-11668. hadoop-daemons.sh bw compat broke with --slaves change (Vinayakumar B via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77110498
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77110498
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77110498

Branch: refs/heads/YARN-2928
Commit: 7711049837d69d0eeabad27f2e30fab606a4adc2
Parents: 47f7f18
Author: Allen Wittenauer <aw...@apache.org>
Authored: Mon Mar 9 22:31:50 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Mon Mar 9 22:31:50 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt       |  3 +++
 .../hadoop-common/src/main/bin/hadoop-daemons.sh      | 13 +++++++++----
 .../hadoop-common/src/main/bin/hadoop-functions.sh    | 14 +++++++++++++-
 hadoop-yarn-project/hadoop-yarn/bin/yarn-daemons.sh   | 13 +++++++++----
 4 files changed, 34 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77110498/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index f831d1a..7d0cbee 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -423,6 +423,9 @@ Trunk (Unreleased)
     HADOOP-11653. shellprofiles should require .sh extension
     (Brahma Reddy Battula via aw)
 
+    HADOOP-11668. hadoop-daemons.sh bw compat broke with --slaves change
+    (Vinayakumar B via aw)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77110498/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh
index 9e4e6b0..2619ab7 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh
@@ -65,8 +65,13 @@ hadoop_error "WARNING: Attempting to execute replacement \"hdfs --slaves --daemo
 # we're going to turn this into
 #  hdfs --slaves --daemon (start|stop) (rest of options)
 #
-argv=(${HADOOP_USER_PARAMS[@]/start})
-argv=(${argv[@]/stop})
-argv=(${argv[@]/status})
+for (( i = 0; i < ${#HADOOP_USER_PARAMS[@]}; i++ ))
+do
+  if [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^start$ ]] ||
+     [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^stop$ ]] ||
+     [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^status$ ]]; then
+    unset HADOOP_USER_PARAMS[$i]
+  fi
+done
 
-${hdfsscript} --slaves --daemon "${daemonmode}" "${argv[@]}"
+${hdfsscript} --slaves --daemon "${daemonmode}" "${HADOOP_USER_PARAMS[@]}"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77110498/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 9488e3c..8129c5c 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -415,7 +415,19 @@ function hadoop_common_slave_mode_execute
 
   # if --slaves is still on the command line, remove it
   # to prevent loops
-  argv=(${argv[@]/--slaves})
+  # Also remove --hostnames and --hosts along with arg values
+  local argsSize=${#argv[@]};
+  for (( i = 0; i < $argsSize; i++ ))
+  do
+    if [[ "${argv[$i]}" =~ ^--slaves$ ]]; then
+      unset argv[$i]
+    elif [[ "${argv[$i]}" =~ ^--hostnames$ ]] ||
+      [[ "${argv[$i]}" =~ ^--hosts$ ]]; then
+      unset argv[$i];
+      let i++;
+      unset argv[$i];
+    fi
+  done
   hadoop_connect_to_hosts -- "${argv[@]}"
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77110498/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemons.sh
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemons.sh b/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemons.sh
index c6963d9..75fb1f8 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemons.sh
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemons.sh
@@ -55,9 +55,14 @@ hadoop_error "WARNING: Attempting to execute replacement \"yarn --slaves --daemo
 # we're going to turn this into
 #  yarn --slaves --daemon (start|stop) (rest of options)
 #
-argv=(${HADOOP_USER_PARAMS[@]/start})
-argv=(${argv[@]/stop})
-argv=(${argv[@]/status})
+for (( i = 0; i < ${#HADOOP_USER_PARAMS[@]}; i++ ))
+do
+  if [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^start$ ]] ||
+     [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^stop$ ]] ||
+     [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^status$ ]]; then
+    unset HADOOP_USER_PARAMS[$i]
+  fi
+done
 
-${yarnscript} --slaves --daemon "${daemonmode}" "${argv[@]}"
+${yarnscript} --slaves --daemon "${daemonmode}" "${HADOOP_USER_PARAMS[@]}"
 


[22/49] hadoop git commit: HADOOP-11692. Improve authentication failure WARN message to avoid user confusion. Contributed by Yongjun Zhang.

Posted by zj...@apache.org.
HADOOP-11692. Improve authentication failure WARN message to avoid user confusion. Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de1101cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de1101cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de1101cb

Branch: refs/heads/YARN-2928
Commit: de1101cb5be2d8efd0ef4945f64ccfe7cbd01049
Parents: 42e3a80
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Mon Mar 9 11:55:32 2015 -0700
Committer: Yongjun Zhang <yz...@cloudera.com>
Committed: Mon Mar 9 11:55:32 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt             | 3 +++
 .../src/main/java/org/apache/hadoop/ipc/Server.java         | 9 +++++++--
 2 files changed, 10 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de1101cb/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 37604c4..d5a8463 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1070,6 +1070,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN.
     (Duo Zhang via wheat9)
 
+    HADOOP-11692. Improve authentication failure WARN message to avoid user
+    confusion. (Yongjun Zhang)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de1101cb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 893e0eb..d2d61b3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1324,10 +1324,15 @@ public abstract class Server {
           saslResponse = processSaslMessage(saslMessage);
         } catch (IOException e) {
           rpcMetrics.incrAuthenticationFailures();
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(StringUtils.stringifyException(e));
+          }
           // attempting user could be null
+          IOException tce = (IOException) getTrueCause(e);
           AUDITLOG.warn(AUTH_FAILED_FOR + this.toString() + ":"
-              + attemptingUser + " (" + e.getLocalizedMessage() + ")");
-          throw (IOException) getTrueCause(e);
+              + attemptingUser + " (" + e.getLocalizedMessage()
+              + ") with true cause: (" + tce.getLocalizedMessage() + ")");
+          throw tce;
         }
         
         if (saslServer != null && saslServer.isComplete()) {


[48/49] hadoop git commit: HADOOP-9477. Add posixGroups support for LDAP groups mapping service. (Dapeng Sun via Yongjun Zhang)

Posted by zj...@apache.org.
HADOOP-9477. Add posixGroups support for LDAP groups mapping service. (Dapeng Sun via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82128774
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82128774
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82128774

Branch: refs/heads/YARN-2928
Commit: 82128774156c30a535b62d764bb6cf9c8d2f3a3b
Parents: 863079b
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Thu Mar 12 14:42:34 2015 -0700
Committer: Yongjun Zhang <yz...@cloudera.com>
Committed: Thu Mar 12 14:52:36 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../hadoop/security/LdapGroupsMapping.java      |  54 ++++++++--
 .../hadoop/security/TestLdapGroupsMapping.java  |  42 +-------
 .../security/TestLdapGroupsMappingBase.java     |  77 ++++++++++++++
 .../TestLdapGroupsMappingWithPosixGroup.java    | 103 +++++++++++++++++++
 5 files changed, 229 insertions(+), 50 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82128774/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 40b5cd0..6970bad 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -37,6 +37,9 @@ Trunk (Unreleased)
 
     HADOOP-11565. Add --slaves shell option (aw)
 
+    HADOOP-9477. Add posixGroups support for LDAP groups mapping service.
+    (Dapeng Sun via Yongjun Zhang)
+
   IMPROVEMENTS
 
     HADOOP-8017. Configure hadoop-main pom to get rid of M2E plugin execution

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82128774/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
index d463ac7..df91b70 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
@@ -150,6 +150,14 @@ public class LdapGroupsMapping
   public static final String GROUP_NAME_ATTR_DEFAULT = "cn";
 
   /*
+   * Posix attributes
+   */
+  public static final String POSIX_UIDNUMBER = "uidNumber";
+  public static final String POSIX_GIDNUMBER = "gidNumber";
+  public static final String POSIX_GROUP = "posixGroup";
+  public static final String POSIX_ACCOUNT = "posixAccount";
+
+  /*
    * LDAP {@link SearchControls} attribute to set the time limit
    * for an invoked directory search. Prevents infinite wait cases.
    */
@@ -178,6 +186,7 @@ public class LdapGroupsMapping
   private String userSearchFilter;
   private String groupMemberAttr;
   private String groupNameAttr;
+  private boolean isPosix;
 
   public static int RECONNECT_RETRY_COUNT = 3;
   
@@ -242,15 +251,40 @@ public class LdapGroupsMapping
       SearchResult result = results.nextElement();
       String userDn = result.getNameInNamespace();
 
-      NamingEnumeration<SearchResult> groupResults =
-          ctx.search(baseDN,
-              "(&" + groupSearchFilter + "(" + groupMemberAttr + "={0}))",
-              new Object[]{userDn},
-              SEARCH_CONTROLS);
-      while (groupResults.hasMoreElements()) {
-        SearchResult groupResult = groupResults.nextElement();
-        Attribute groupName = groupResult.getAttributes().get(groupNameAttr);
-        groups.add(groupName.get().toString());
+      NamingEnumeration<SearchResult> groupResults = null;
+
+      if (isPosix) {
+        String gidNumber = null;
+        String uidNumber = null;
+        Attribute gidAttribute = result.getAttributes().get(POSIX_GIDNUMBER);
+        Attribute uidAttribute = result.getAttributes().get(POSIX_UIDNUMBER);
+        if (gidAttribute != null) {
+          gidNumber = gidAttribute.get().toString();
+        }
+        if (uidAttribute != null) {
+          uidNumber = uidAttribute.get().toString();
+        }
+        if (uidNumber != null && gidNumber != null) {
+          groupResults =
+              ctx.search(baseDN,
+                  "(&"+ groupSearchFilter + "(|(" + POSIX_GIDNUMBER + "={0})" +
+                      "(" + groupMemberAttr + "={1})))",
+                  new Object[] { gidNumber, uidNumber },
+                  SEARCH_CONTROLS);
+        }
+      } else {
+        groupResults =
+            ctx.search(baseDN,
+                "(&" + groupSearchFilter + "(" + groupMemberAttr + "={0}))",
+                new Object[]{userDn},
+                SEARCH_CONTROLS);
+      }
+      if (groupResults != null) {
+        while (groupResults.hasMoreElements()) {
+          SearchResult groupResult = groupResults.nextElement();
+          Attribute groupName = groupResult.getAttributes().get(groupNameAttr);
+          groups.add(groupName.get().toString());
+        }
       }
     }
 
@@ -334,6 +368,8 @@ public class LdapGroupsMapping
         conf.get(GROUP_SEARCH_FILTER_KEY, GROUP_SEARCH_FILTER_DEFAULT);
     userSearchFilter =
         conf.get(USER_SEARCH_FILTER_KEY, USER_SEARCH_FILTER_DEFAULT);
+    isPosix = groupSearchFilter.contains(POSIX_GROUP) && userSearchFilter
+        .contains(POSIX_ACCOUNT);
     groupMemberAttr =
         conf.get(GROUP_MEMBERSHIP_ATTR_KEY, GROUP_MEMBERSHIP_ATTR_DEFAULT);
     groupNameAttr =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82128774/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java
index a021a8a..17a14d1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java
@@ -29,13 +29,7 @@ import java.util.Arrays;
 import java.util.List;
 
 import javax.naming.CommunicationException;
-import javax.naming.NamingEnumeration;
 import javax.naming.NamingException;
-import javax.naming.directory.Attribute;
-import javax.naming.directory.Attributes;
-import javax.naming.directory.BasicAttribute;
-import javax.naming.directory.BasicAttributes;
-import javax.naming.directory.DirContext;
 import javax.naming.directory.SearchControls;
 import javax.naming.directory.SearchResult;
 
@@ -49,46 +43,12 @@ import org.junit.Before;
 import org.junit.Test;
 
 @SuppressWarnings("unchecked")
-public class TestLdapGroupsMapping {
-  private DirContext mockContext;
-  
-  private LdapGroupsMapping mappingSpy = spy(new LdapGroupsMapping());
-  private NamingEnumeration mockUserNamingEnum = mock(NamingEnumeration.class);
-  private NamingEnumeration mockGroupNamingEnum = mock(NamingEnumeration.class);
-  private String[] testGroups = new String[] {"group1", "group2"};
-  
+public class TestLdapGroupsMapping extends TestLdapGroupsMappingBase {
   @Before
   public void setupMocks() throws NamingException {
-    mockContext = mock(DirContext.class);
-    doReturn(mockContext).when(mappingSpy).getDirContext();
-            
     SearchResult mockUserResult = mock(SearchResult.class);
-    // We only ever call hasMoreElements once for the user NamingEnum, so 
-    // we can just have one return value
-    when(mockUserNamingEnum.hasMoreElements()).thenReturn(true);
     when(mockUserNamingEnum.nextElement()).thenReturn(mockUserResult);
     when(mockUserResult.getNameInNamespace()).thenReturn("CN=some_user,DC=test,DC=com");
-    
-    SearchResult mockGroupResult = mock(SearchResult.class);
-    // We're going to have to define the loop here. We want two iterations,
-    // to get both the groups
-    when(mockGroupNamingEnum.hasMoreElements()).thenReturn(true, true, false);
-    when(mockGroupNamingEnum.nextElement()).thenReturn(mockGroupResult);
-    
-    // Define the attribute for the name of the first group
-    Attribute group1Attr = new BasicAttribute("cn");
-    group1Attr.add(testGroups[0]);
-    Attributes group1Attrs = new BasicAttributes();
-    group1Attrs.put(group1Attr);
-    
-    // Define the attribute for the name of the second group
-    Attribute group2Attr = new BasicAttribute("cn");
-    group2Attr.add(testGroups[1]);
-    Attributes group2Attrs = new BasicAttributes();
-    group2Attrs.put(group2Attr);
-    
-    // This search result gets reused, so return group1, then group2
-    when(mockGroupResult.getAttributes()).thenReturn(group1Attrs, group2Attrs);
   }
   
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82128774/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingBase.java
new file mode 100644
index 0000000..c54ac4c
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingBase.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.security;
+
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.when;
+
+import javax.naming.NamingEnumeration;
+import javax.naming.NamingException;
+import javax.naming.directory.Attribute;
+import javax.naming.directory.Attributes;
+import javax.naming.directory.BasicAttribute;
+import javax.naming.directory.BasicAttributes;
+import javax.naming.directory.DirContext;
+import javax.naming.directory.SearchResult;
+
+import org.junit.Before;
+
+public class TestLdapGroupsMappingBase {
+  protected DirContext mockContext;
+
+  protected LdapGroupsMapping mappingSpy = spy(new LdapGroupsMapping());
+  protected NamingEnumeration mockUserNamingEnum =
+      mock(NamingEnumeration.class);
+  protected NamingEnumeration mockGroupNamingEnum =
+      mock(NamingEnumeration.class);
+  protected String[] testGroups = new String[] {"group1", "group2"};
+
+  @Before
+  public void setupMocksBase() throws NamingException {
+    mockContext = mock(DirContext.class);
+    doReturn(mockContext).when(mappingSpy).getDirContext();
+
+    // We only ever call hasMoreElements once for the user NamingEnum, so
+    // we can just have one return value
+    when(mockUserNamingEnum.hasMoreElements()).thenReturn(true);
+
+    SearchResult mockGroupResult = mock(SearchResult.class);
+    // We're going to have to define the loop here. We want two iterations,
+    // to get both the groups
+    when(mockGroupNamingEnum.hasMoreElements()).thenReturn(true, true, false);
+    when(mockGroupNamingEnum.nextElement()).thenReturn(mockGroupResult);
+
+    // Define the attribute for the name of the first group
+    Attribute group1Attr = new BasicAttribute("cn");
+    group1Attr.add(testGroups[0]);
+    Attributes group1Attrs = new BasicAttributes();
+    group1Attrs.put(group1Attr);
+
+    // Define the attribute for the name of the second group
+    Attribute group2Attr = new BasicAttribute("cn");
+    group2Attr.add(testGroups[1]);
+    Attributes group2Attrs = new BasicAttributes();
+    group2Attrs.put(group2Attr);
+
+    // This search result gets reused, so return group1, then group2
+    when(mockGroupResult.getAttributes()).thenReturn(group1Attrs, group2Attrs);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82128774/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingWithPosixGroup.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingWithPosixGroup.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingWithPosixGroup.java
new file mode 100644
index 0000000..1d1a354
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingWithPosixGroup.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.security;
+
+import static org.mockito.Matchers.anyString;
+
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.contains;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
+import javax.naming.NamingException;
+import javax.naming.directory.Attribute;
+import javax.naming.directory.Attributes;
+import javax.naming.directory.SearchControls;
+import javax.naming.directory.SearchResult;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+@SuppressWarnings("unchecked")
+public class TestLdapGroupsMappingWithPosixGroup
+  extends TestLdapGroupsMappingBase {
+
+  @Before
+  public void setupMocks() throws NamingException {
+    SearchResult mockUserResult = mock(SearchResult.class);
+    when(mockUserNamingEnum.nextElement()).thenReturn(mockUserResult);
+
+    Attribute mockUidAttr = mock(Attribute.class);
+    Attribute mockGidAttr = mock(Attribute.class);
+    Attributes mockAttrs = mock(Attributes.class);
+
+    when(mockUidAttr.get()).thenReturn("700");
+    when(mockGidAttr.get()).thenReturn("600");
+    when(mockAttrs.get(eq("uidNumber"))).thenReturn(mockUidAttr);
+    when(mockAttrs.get(eq("gidNumber"))).thenReturn(mockGidAttr);
+
+    when(mockUserResult.getAttributes()).thenReturn(mockAttrs);
+  }
+
+  @Test
+  public void testGetGroups() throws IOException, NamingException {
+    // The search functionality of the mock context is reused, so we will
+    // return the user NamingEnumeration first, and then the group
+    when(mockContext.search(anyString(), contains("posix"),
+        any(Object[].class), any(SearchControls.class)))
+        .thenReturn(mockUserNamingEnum, mockGroupNamingEnum);
+
+    doTestGetGroups(Arrays.asList(testGroups), 2);
+  }
+
+  private void doTestGetGroups(List<String> expectedGroups, int searchTimes)
+      throws IOException, NamingException {
+    Configuration conf = new Configuration();
+    // Set this, so we don't throw an exception
+    conf.set(LdapGroupsMapping.LDAP_URL_KEY, "ldap://test");
+    conf.set(LdapGroupsMapping.GROUP_SEARCH_FILTER_KEY,
+        "(objectClass=posixGroup)(cn={0})");
+    conf.set(LdapGroupsMapping.USER_SEARCH_FILTER_KEY,
+        "(objectClass=posixAccount)");
+    conf.set(LdapGroupsMapping.GROUP_MEMBERSHIP_ATTR_KEY, "memberUid");
+    conf.set(LdapGroupsMapping.GROUP_NAME_ATTR_KEY, "cn");
+
+    mappingSpy.setConf(conf);
+    // Username is arbitrary, since the spy is mocked to respond the same,
+    // regardless of input
+    List<String> groups = mappingSpy.getGroups("some_user");
+
+    Assert.assertEquals(expectedGroups, groups);
+
+    // We should have searched for a user, and then two groups
+    verify(mockContext, times(searchTimes)).search(anyString(),
+                                         anyString(),
+                                         any(Object[].class),
+                                         any(SearchControls.class));
+  }
+}


[05/49] hadoop git commit: YARN-3227. Timeline renew delegation token fails when RM user's TGT is expired. Contributed by Zhijie Shen

Posted by zj...@apache.org.
YARN-3227. Timeline renew delegation token fails when RM user's TGT is
expired. Contributed by Zhijie Shen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1abc5d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1abc5d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1abc5d4

Branch: refs/heads/YARN-2928
Commit: d1abc5d4fc00bb1b226066684556ba16ace71744
Parents: 24db081
Author: Xuan <xg...@apache.org>
Authored: Fri Mar 6 13:32:05 2015 -0800
Committer: Xuan <xg...@apache.org>
Committed: Fri Mar 6 13:32:05 2015 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                   | 3 +++
 .../apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java    | 2 ++
 2 files changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1abc5d4/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index accde78..d073169 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -713,6 +713,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher receiving 
     events for old client. (Zhihai Xu via kasha)
 
+    YARN-3227. Timeline renew delegation token fails when RM user's TGT is expired
+    (Zhijie Shen via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1abc5d4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index af68492..c05d65b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
@@ -439,6 +439,7 @@ public class TimelineClientImpl extends TimelineClient {
         UserGroupInformation callerUGI = isProxyAccess ?
             UserGroupInformation.getCurrentUser().getRealUser()
             : UserGroupInformation.getCurrentUser();
+        callerUGI.checkTGTAndReloginFromKeytab();
         try {
           return callerUGI.doAs(action);
         } catch (UndeclaredThrowableException e) {
@@ -488,6 +489,7 @@ public class TimelineClientImpl extends TimelineClient {
           : UserGroupInformation.getCurrentUser();
       final String doAsUser = isProxyAccess ?
           UserGroupInformation.getCurrentUser().getShortUserName() : null;
+      callerUGI.checkTGTAndReloginFromKeytab();
       try {
         return callerUGI.doAs(new PrivilegedExceptionAction<HttpURLConnection>() {
           @Override


[38/49] hadoop git commit: YARN-3295. Fix documentation nits found in markdown conversion. Contributed by Masatake Iwasaki.

Posted by zj...@apache.org.
YARN-3295. Fix documentation nits found in markdown conversion. Contributed by Masatake Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30c428a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30c428a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30c428a8

Branch: refs/heads/YARN-2928
Commit: 30c428a858c179645d6dc82b7027f6b7e871b439
Parents: 5c1036d
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Wed Mar 11 15:02:24 2015 +0900
Committer: Tsuyoshi Ozawa <oz...@apache.org>
Committed: Wed Mar 11 15:02:24 2015 +0900

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +++
 .../src/site/markdown/ResourceManagerRestart.md |  6 ++---
 .../src/site/markdown/YarnCommands.md           | 28 ++++++++++----------
 3 files changed, 19 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30c428a8/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d882f3e..8930045 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -749,6 +749,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3300. Outstanding_resource_requests table should not be shown in AHS.
     (Xuan Gong via jianhe)
 
+    YARN-3295. Fix documentation nits found in markdown conversion.
+    (Masatake Iwasaki via ozawa)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30c428a8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRestart.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRestart.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRestart.md
index e516afb..d23505d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRestart.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRestart.md
@@ -141,12 +141,10 @@ Notes
 -----
 
 ContainerId string format is changed if RM restarts with work-preserving recovery enabled. It used to be such format:
-
-    Container_{clusterTimestamp}_{appId}_{attemptId}_{containerId}, e.g. Container_1410901177871_0001_01_000005.
+`Container_{clusterTimestamp}_{appId}_{attemptId}_{containerId}`, e.g. `Container_1410901177871_0001_01_000005`.
 
 It is now changed to:
-
-    Container_e{epoch}_{clusterTimestamp}_{appId}_{attemptId}_{containerId}, e.g. Container_e17_1410901177871_0001_01_000005.
+`Container_`**e{epoch}**`_{clusterTimestamp}_{appId}_{attemptId}_{containerId}`, e.g. `Container_`**e17**`_1410901177871_0001_01_000005`.
  
 Here, the additional epoch number is a monotonically increasing integer which starts from 0 and is increased by 1 each time RM restarts. If epoch number is 0, it is omitted and the containerId string format stays the same as before.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30c428a8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
index 28bb678..b4bed64 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
@@ -67,11 +67,11 @@ Usage: `yarn application [options] `
 
 | COMMAND\_OPTIONS | Description |
 |:---- |:---- |
-| -appStates States | Works with -list to filter applications based on input comma-separated list of application states. The valid application state can be one of the following:  ALL, NEW, NEW\_SAVING, SUBMITTED, ACCEPTED, RUNNING, FINISHED, FAILED, KILLED |
-| -appTypes Types | Works with -list to filter applications based on input comma-separated list of application types. |
+| -appStates \<States\> | Works with -list to filter applications based on input comma-separated list of application states. The valid application state can be one of the following:  ALL, NEW, NEW\_SAVING, SUBMITTED, ACCEPTED, RUNNING, FINISHED, FAILED, KILLED |
+| -appTypes \<Types\> | Works with -list to filter applications based on input comma-separated list of application types. |
 | -list | Lists applications from the RM. Supports optional use of -appTypes to filter applications based on application type, and -appStates to filter applications based on application state. |
-| -kill ApplicationId | Kills the application. |
-| -status ApplicationId | Prints the status of the application. |
+| -kill \<ApplicationId\> | Kills the application. |
+| -status \<ApplicationId\> | Prints the status of the application. |
 
 Prints application(s) report/kill application
 
@@ -82,8 +82,8 @@ Usage: `yarn applicationattempt [options] `
 | COMMAND\_OPTIONS | Description |
 |:---- |:---- |
 | -help | Help |
-| -list ApplicationId | Lists applications attempts from the RM |
-| -status Application Attempt Id | Prints the status of the application attempt. |
+| -list \<ApplicationId\> | Lists applications attempts for the given application. |
+| -status \<Application Attempt Id\> | Prints the status of the application attempt. |
 
 prints applicationattempt(s) report
 
@@ -100,8 +100,8 @@ Usage: `yarn container [options] `
 | COMMAND\_OPTIONS | Description |
 |:---- |:---- |
 | -help | Help |
-| -list ApplicationId | Lists containers for the application attempt. |
-| -status ContainerId | Prints the status of the container. |
+| -list \<Application Attempt Id\> | Lists containers for the application attempt. |
+| -status \<ContainerId\> | Prints the status of the container. |
 
 prints container(s) report
 
@@ -118,10 +118,10 @@ Usage: `yarn logs -applicationId <application ID> [options] `
 | COMMAND\_OPTIONS | Description |
 |:---- |:---- |
 | -applicationId \<application ID\> | Specifies an application id |
-| -appOwner AppOwner | AppOwner (assumed to be current user if not specified) |
-| -containerId ContainerId | ContainerId (must be specified if node address is specified) |
+| -appOwner \<AppOwner\> | AppOwner (assumed to be current user if not specified) |
+| -containerId \<ContainerId\> | ContainerId (must be specified if node address is specified) |
 | -help | Help |
-| -nodeAddress NodeAddress | NodeAddress in the format nodename:port (must be specified if container id is specified) |
+| -nodeAddress \<NodeAddress\> | NodeAddress in the format nodename:port (must be specified if container id is specified) |
 
 Dump the container logs
 
@@ -133,8 +133,8 @@ Usage: `yarn node [options] `
 |:---- |:---- |
 | -all | Works with -list to list all nodes. |
 | -list | Lists all running nodes. Supports optional use of -states to filter nodes based on node state, and -all to list all nodes. |
-| -states States | Works with -list to filter nodes based on input comma-separated list of node states. |
-| -status NodeId | Prints the status report of the node. |
+| -states \<States\> | Works with -list to filter nodes based on input comma-separated list of node states. |
+| -status \<NodeId\> | Prints the status report of the node. |
 
 Prints node report(s)
 
@@ -145,7 +145,7 @@ Usage: `yarn queue [options] `
 | COMMAND\_OPTIONS | Description |
 |:---- |:---- |
 | -help | Help |
-| -status QueueName | Prints the status of the queue. |
+| -status \<QueueName\> | Prints the status of the queue. |
 
 Prints queue information
 


[18/49] hadoop git commit: HDFS-7857. Improve authentication failure WARN message to avoid user confusion. Contributed by Yongjun Zhang.

Posted by zj...@apache.org.
HDFS-7857. Improve authentication failure WARN message to avoid user confusion. Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d799fbe1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d799fbe1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d799fbe1

Branch: refs/heads/YARN-2928
Commit: d799fbe1ccf8752c44f087e34b5f400591d3b5bd
Parents: b3e6992
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Sun Mar 8 20:39:46 2015 -0700
Committer: Yongjun Zhang <yz...@cloudera.com>
Committed: Sun Mar 8 20:39:46 2015 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/ipc/Server.java         | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d799fbe1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 893e0eb..d2d61b3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1324,10 +1324,15 @@ public abstract class Server {
           saslResponse = processSaslMessage(saslMessage);
         } catch (IOException e) {
           rpcMetrics.incrAuthenticationFailures();
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(StringUtils.stringifyException(e));
+          }
           // attempting user could be null
+          IOException tce = (IOException) getTrueCause(e);
           AUDITLOG.warn(AUTH_FAILED_FOR + this.toString() + ":"
-              + attemptingUser + " (" + e.getLocalizedMessage() + ")");
-          throw (IOException) getTrueCause(e);
+              + attemptingUser + " (" + e.getLocalizedMessage()
+              + ") with true cause: (" + tce.getLocalizedMessage() + ")");
+          throw tce;
         }
         
         if (saslServer != null && saslServer.isComplete()) {


[30/49] hadoop git commit: Configurable timeout between YARNRunner terminate the application and forcefully kill. Contributed by Eric Payne.

Posted by zj...@apache.org.
Configurable timeout between YARNRunner terminate the application and forcefully kill. Contributed by Eric Payne.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d39bc903
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d39bc903
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d39bc903

Branch: refs/heads/YARN-2928
Commit: d39bc903a0069a740744bafe10e506e452ed7018
Parents: 7711049
Author: Junping Du <ju...@apache.org>
Authored: Tue Mar 10 06:21:59 2015 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Tue Mar 10 06:21:59 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |  3 +++
 .../apache/hadoop/mapreduce/MRJobConfig.java    |  5 ++++
 .../src/main/resources/mapred-default.xml       |  8 ++++++
 .../org/apache/hadoop/mapred/YARNRunner.java    |  5 +++-
 .../apache/hadoop/mapred/TestYARNRunner.java    | 26 ++++++++++++++++++++
 5 files changed, 46 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d39bc903/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 8f06ac8..5f33041 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -335,6 +335,9 @@ Release 2.7.0 - UNRELEASED
     MAPREDUCE-6267. Refactor JobSubmitter#copyAndConfigureFiles into it's own 
     class. (Chris Trezzo via kasha)
 
+    MAPREDUCE-6263. Configurable timeout between YARNRunner terminate the 
+    application and forcefully kill. (Eric Payne via junping_du)
+
   OPTIMIZATIONS
 
     MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d39bc903/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 5527103..9f671cd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -644,6 +644,11 @@ public interface MRJobConfig {
   public static final int DEFAULT_MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD =
       50;
 
+  public static final String MR_AM_HARD_KILL_TIMEOUT_MS =
+      MR_AM_PREFIX + "hard-kill-timeout-ms";
+  public static final long DEFAULT_MR_AM_HARD_KILL_TIMEOUT_MS =
+      10 * 1000l;
+
   /**
    * The threshold in terms of seconds after which an unsatisfied mapper request
    * triggers reducer preemption to free space. Default 0 implies that the reduces

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d39bc903/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index d864756..1162183 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1360,6 +1360,14 @@
 </property>
 
 <property>
+  <name>yarn.app.mapreduce.am.hard-kill-timeout-ms</name>
+  <value>10000</value>
+  <description>
+     Number of milliseconds to wait before the job client kills the application.
+  </description>
+</property>
+
+<property>
   <description>CLASSPATH for MR applications. A comma-separated list
   of CLASSPATH entries. If mapreduce.application.framework is set then this
   must specify the appropriate classpath for that archive, and the name of

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d39bc903/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
index 41dc72f..8e57607 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
@@ -640,7 +640,10 @@ public class YARNRunner implements ClientProtocol {
       clientCache.getClient(arg0).killJob(arg0);
       long currentTimeMillis = System.currentTimeMillis();
       long timeKillIssued = currentTimeMillis;
-      while ((currentTimeMillis < timeKillIssued + 10000L)
+      long killTimeOut =
+          conf.getLong(MRJobConfig.MR_AM_HARD_KILL_TIMEOUT_MS,
+                       MRJobConfig.DEFAULT_MR_AM_HARD_KILL_TIMEOUT_MS);
+      while ((currentTimeMillis < timeKillIssued + killTimeOut)
           && !isJobInTerminalState(status)) {
         try {
           Thread.sleep(1000L);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d39bc903/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
index 420a95f..c427975 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
@@ -201,6 +201,32 @@ public class TestYARNRunner extends TestCase {
     verify(clientDelegate).killJob(jobId);
   }
 
+  @Test(timeout=60000)
+  public void testJobKillTimeout() throws Exception {
+    long timeToWaitBeforeHardKill =
+        10000 + MRJobConfig.DEFAULT_MR_AM_HARD_KILL_TIMEOUT_MS;
+    conf.setLong(MRJobConfig.MR_AM_HARD_KILL_TIMEOUT_MS,
+        timeToWaitBeforeHardKill);
+    clientDelegate = mock(ClientServiceDelegate.class);
+    doAnswer(
+        new Answer<ClientServiceDelegate>() {
+          @Override
+          public ClientServiceDelegate answer(InvocationOnMock invocation)
+              throws Throwable {
+            return clientDelegate;
+          }
+        }
+      ).when(clientCache).getClient(any(JobID.class));
+    when(clientDelegate.getJobStatus(any(JobID.class))).thenReturn(new
+        org.apache.hadoop.mapreduce.JobStatus(jobId, 0f, 0f, 0f, 0f,
+            State.RUNNING, JobPriority.HIGH, "tmp", "tmp", "tmp", "tmp"));
+    long startTimeMillis = System.currentTimeMillis();
+    yarnRunner.killJob(jobId);
+    assertTrue("killJob should have waited at least " + timeToWaitBeforeHardKill
+        + " ms.", System.currentTimeMillis() - startTimeMillis
+                  >= timeToWaitBeforeHardKill);
+  }
+
   @Test(timeout=20000)
   public void testJobSubmissionFailure() throws Exception {
     when(resourceMgrDelegate.submitApplication(any(ApplicationSubmissionContext.class))).


[42/49] hadoop git commit: YARN-1884. Added nodeHttpAddress into ContainerReport and fixed the link to NM web page. Contributed by Xuan Gong.

Posted by zj...@apache.org.
YARN-1884. Added nodeHttpAddress into ContainerReport and fixed the link to NM web page. Contributed by Xuan Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85f6d67f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85f6d67f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85f6d67f

Branch: refs/heads/YARN-2928
Commit: 85f6d67fa78511f255fcfa810afc9a156a7b483b
Parents: 7a346bc
Author: Zhijie Shen <zj...@apache.org>
Authored: Wed Mar 11 19:35:19 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Wed Mar 11 19:35:19 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +++
 .../yarn/api/records/ContainerReport.java       | 17 ++++++++++++++-
 .../src/main/proto/yarn_protos.proto            |  1 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  | 11 +++++++---
 .../hadoop/yarn/client/ProtocolHATestBase.java  |  4 ++--
 .../yarn/client/api/impl/TestAHSClient.java     |  6 ++++--
 .../yarn/client/api/impl/TestYarnClient.java    | 16 ++++++++------
 .../hadoop/yarn/client/cli/TestYarnCLI.java     | 18 ++++++++++++----
 .../records/impl/pb/ContainerReportPBImpl.java  | 19 +++++++++++++++++
 .../ApplicationHistoryManagerImpl.java          |  2 +-
 ...pplicationHistoryManagerOnTimelineStore.java | 10 ++++++++-
 .../metrics/ContainerMetricsConstants.java      |  2 ++
 .../yarn/server/webapp/AppAttemptBlock.java     | 12 ++++++-----
 .../hadoop/yarn/server/webapp/AppBlock.java     | 11 ++++------
 .../yarn/server/webapp/ContainerBlock.java      |  7 ++++++-
 .../yarn/server/webapp/dao/ContainerInfo.java   |  5 +++++
 .../metrics/ContainerCreatedEvent.java          |  8 ++++++-
 .../metrics/SystemMetricsPublisher.java         |  5 ++++-
 .../rmcontainer/RMContainer.java                |  1 +
 .../rmcontainer/RMContainerImpl.java            | 22 ++++++++++++++++++--
 .../metrics/TestSystemMetricsPublisher.java     |  5 ++++-
 21 files changed, 147 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8930045..969c6a1 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -752,6 +752,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3295. Fix documentation nits found in markdown conversion.
     (Masatake Iwasaki via ozawa)
 
+    YARN-1884. Added nodeHttpAddress into ContainerReport and fixed the link to NM
+    web page. (Xuan Gong via zjshen)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
index 4cce77f..72b8edf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.util.Records;
  * <li>{@link ContainerState} of the container.</li>
  * <li>Diagnostic information in case of errors.</li>
  * <li>Log URL.</li>
+ * <li>nodeHttpAddress</li>
  * </ul>
  * </p>
  * 
@@ -54,7 +55,8 @@ public abstract class ContainerReport {
   public static ContainerReport newInstance(ContainerId containerId,
       Resource allocatedResource, NodeId assignedNode, Priority priority,
       long creationTime, long finishTime, String diagnosticInfo, String logUrl,
-      int containerExitStatus, ContainerState containerState) {
+      int containerExitStatus, ContainerState containerState,
+      String nodeHttpAddress) {
     ContainerReport report = Records.newRecord(ContainerReport.class);
     report.setContainerId(containerId);
     report.setAllocatedResource(allocatedResource);
@@ -66,6 +68,7 @@ public abstract class ContainerReport {
     report.setLogUrl(logUrl);
     report.setContainerExitStatus(containerExitStatus);
     report.setContainerState(containerState);
+    report.setNodeHttpAddress(nodeHttpAddress);
     return report;
   }
 
@@ -199,4 +202,16 @@ public abstract class ContainerReport {
   @Unstable
   public abstract void setContainerExitStatus(int containerExitStatus);
 
+  /**
+   * Get the Node Http address of the container
+   * 
+   * @return the node http address of the container
+   */
+  @Public
+  @Unstable
+  public abstract String getNodeHttpAddress();
+
+  @Private
+  @Unstable
+  public abstract void setNodeHttpAddress(String nodeHttpAddress);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 4e29d2f..90706ed 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -98,6 +98,7 @@ message ContainerReportProto {
   optional string log_url = 8;
   optional int32 container_exit_status = 9;
   optional ContainerStateProto container_state = 10;
+  optional string node_http_address = 11;
 }
 
 enum YarnApplicationStateProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
index 108ad0b..dd4a949 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
@@ -63,7 +63,7 @@ public class ApplicationCLI extends YarnCLI {
     "%30s\t%20s\t%35s\t%35s"
       + System.getProperty("line.separator");
   private static final String CONTAINER_PATTERN = 
-    "%30s\t%20s\t%20s\t%20s\t%20s\t%35s"
+    "%30s\t%20s\t%20s\t%20s\t%20s\t%20s\t%35s"
       + System.getProperty("line.separator");
 
   private static final String APP_TYPE_CMD = "appTypes";
@@ -355,6 +355,9 @@ public class ApplicationCLI extends YarnCLI {
       containerReportStr.println(containerReport.getLogUrl());
       containerReportStr.print("\tHost : ");
       containerReportStr.println(containerReport.getAssignedNode());
+      containerReportStr.print("\tNodeHttpAddress : ");
+      containerReportStr.println(containerReport.getNodeHttpAddress() == null
+          ? "N/A" : containerReport.getNodeHttpAddress());
       containerReportStr.print("\tDiagnostics : ");
       containerReportStr.print(containerReport.getDiagnosticsInfo());
     } else {
@@ -595,7 +598,7 @@ public class ApplicationCLI extends YarnCLI {
         .getContainers(ConverterUtils.toApplicationAttemptId(appAttemptId));
     writer.println("Total number of containers " + ":" + appsReport.size());
     writer.printf(CONTAINER_PATTERN, "Container-Id", "Start Time",
-        "Finish Time", "State", "Host", "LOG-URL");
+        "Finish Time", "State", "Host", "Node Http Address", "LOG-URL");
     for (ContainerReport containerReport : appsReport) {
       writer.printf(
           CONTAINER_PATTERN,
@@ -603,7 +606,9 @@ public class ApplicationCLI extends YarnCLI {
           Times.format(containerReport.getCreationTime()),
           Times.format(containerReport.getFinishTime()),      
           containerReport.getContainerState(), containerReport
-              .getAssignedNode(), containerReport.getLogUrl());
+              .getAssignedNode(), containerReport.getNodeHttpAddress() == null
+                  ? "N/A" : containerReport.getNodeHttpAddress(),
+          containerReport.getLogUrl());
     }
     writer.flush();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
index 782bc43..f468bc1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
@@ -77,7 +77,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
-import org.apache.hadoop.yarn.api.records.AMCommand;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -715,7 +714,8 @@ public abstract class ProtocolHATestBase extends ClientBaseWithFixes {
     public ContainerReport createFakeContainerReport() {
       return ContainerReport.newInstance(createFakeContainerId(), null,
           NodeId.newInstance("localhost", 0), null, 1000l, 1200l, "", "", 0,
-          ContainerState.COMPLETE);
+          ContainerState.COMPLETE,
+          "http://" + NodeId.newInstance("localhost", 0).toString());
     }
 
     public List<ContainerReport> createFakeContainerReports() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java
index a88189e..c3e3c41 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java
@@ -371,14 +371,16 @@ public class TestAHSClient {
           ContainerReport.newInstance(
             ContainerId.newContainerId(attempt.getApplicationAttemptId(), 1),
             null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234,
-            5678, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE);
+            5678, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE,
+            "http://" + NodeId.newInstance("host", 2345).toString());
       containerReports.add(container);
 
       ContainerReport container1 =
           ContainerReport.newInstance(
             ContainerId.newContainerId(attempt.getApplicationAttemptId(), 2),
             null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234,
-            5678, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE);
+            5678, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE,
+            "http://" + NodeId.newInstance("host", 2345).toString());
       containerReports.add(container1);
       containers.put(attempt.getApplicationAttemptId(), containerReports);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 9946506..de669f2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -117,7 +117,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.UTCClock;
-import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
@@ -613,13 +612,15 @@ public class TestYarnClient {
       ContainerReport container = ContainerReport.newInstance(
           ContainerId.newContainerId(attempt.getApplicationAttemptId(), 1), null,
           NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678,
-          "diagnosticInfo", "logURL", 0, ContainerState.RUNNING);
+          "diagnosticInfo", "logURL", 0, ContainerState.RUNNING,
+          "http://" + NodeId.newInstance("host", 2345).toString());
       containerReports.add(container);
 
       ContainerReport container1 = ContainerReport.newInstance(
           ContainerId.newContainerId(attempt.getApplicationAttemptId(), 2), null,
           NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678,
-          "diagnosticInfo", "logURL", 0, ContainerState.RUNNING);
+          "diagnosticInfo", "logURL", 0, ContainerState.RUNNING,
+          "http://" + NodeId.newInstance("host", 2345).toString());
       containerReports.add(container1);
       containers.put(attempt.getApplicationAttemptId(), containerReports);
       
@@ -630,18 +631,21 @@ public class TestYarnClient {
       container = ContainerReport.newInstance(
           ContainerId.newContainerId(attempt.getApplicationAttemptId(), 1), null,
           NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678,
-          "diagnosticInfo", "logURL", 0, null);
+          "diagnosticInfo", "logURL", 0, null,
+          "http://" + NodeId.newInstance("host", 2345).toString());
       containerReportsForAHS.add(container);
 
       container1 = ContainerReport.newInstance(
           ContainerId.newContainerId(attempt.getApplicationAttemptId(), 2), null,
           NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678,
-          "diagnosticInfo", "HSlogURL", 0, null);
+          "diagnosticInfo", "HSlogURL", 0, null,
+          "http://" + NodeId.newInstance("host", 2345).toString());
       containerReportsForAHS.add(container1);
       ContainerReport container2 = ContainerReport.newInstance(
           ContainerId.newContainerId(attempt.getApplicationAttemptId(),3), null,
           NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678,
-          "diagnosticInfo", "HSlogURL", 0, ContainerState.COMPLETE);
+          "diagnosticInfo", "HSlogURL", 0, ContainerState.COMPLETE,
+          "http://" + NodeId.newInstance("host", 2345).toString());
       containerReportsForAHS.add(container2);
       containersFromAHS.put(attempt.getApplicationAttemptId(), containerReportsForAHS);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
index 088969f..4b60c52 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
@@ -224,7 +224,8 @@ public class TestYarnCLI {
     ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
     ContainerReport container = ContainerReport.newInstance(containerId, null,
         NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678,
-        "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE);
+        "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE,
+        "http://" + NodeId.newInstance("host", 2345).toString());
     when(client.getContainerReport(any(ContainerId.class))).thenReturn(
         container);
     int result = cli.run(new String[] { "container", "-status",
@@ -240,6 +241,7 @@ public class TestYarnCLI {
     pw.println("\tState : COMPLETE");
     pw.println("\tLOG-URL : logURL");
     pw.println("\tHost : host:1234");
+    pw.println("\tNodeHttpAddress : http://host:2345");
     pw.println("\tDiagnostics : diagnosticInfo");
     pw.close();
     String appReportStr = baos.toString("UTF-8");
@@ -259,13 +261,16 @@ public class TestYarnCLI {
     long time1=1234,time2=5678;
     ContainerReport container = ContainerReport.newInstance(containerId, null,
         NodeId.newInstance("host", 1234), Priority.UNDEFINED, time1, time2,
-        "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE);
+        "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE,
+        "http://" + NodeId.newInstance("host", 2345).toString());
     ContainerReport container1 = ContainerReport.newInstance(containerId1, null,
         NodeId.newInstance("host", 1234), Priority.UNDEFINED, time1, time2,
-        "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE);
+        "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE,
+        "http://" + NodeId.newInstance("host", 2345).toString());
     ContainerReport container2 = ContainerReport.newInstance(containerId2, null,
         NodeId.newInstance("host", 1234), Priority.UNDEFINED, time1,0,
-        "diagnosticInfo", "", 0, ContainerState.RUNNING);
+        "diagnosticInfo", "", 0, ContainerState.RUNNING,
+        "http://" + NodeId.newInstance("host", 2345).toString());
     List<ContainerReport> reports = new ArrayList<ContainerReport>();
     reports.add(container);
     reports.add(container1);
@@ -273,6 +278,7 @@ public class TestYarnCLI {
     DateFormat dateFormat=new SimpleDateFormat("EEE MMM dd HH:mm:ss Z yyyy");
     when(client.getContainers(any(ApplicationAttemptId.class))).thenReturn(
         reports);
+    sysOutStream.reset();
     int result = cli.run(new String[] { "container", "-list",
         attemptId.toString() });
     assertEquals(0, result);
@@ -285,24 +291,28 @@ public class TestYarnCLI {
     pw.print("\t         Finish Time");
     pw.print("\t               State");
     pw.print("\t                Host");
+    pw.print("\t   Node Http Address");
     pw.println("\t                            LOG-URL");
     pw.print(" container_1234_0005_01_000001");
     pw.print("\t"+dateFormat.format(new Date(time1)));
     pw.print("\t"+dateFormat.format(new Date(time2)));
     pw.print("\t            COMPLETE");
     pw.print("\t           host:1234");
+    pw.print("\t    http://host:2345");
     pw.println("\t                             logURL");
     pw.print(" container_1234_0005_01_000002");
     pw.print("\t"+dateFormat.format(new Date(time1)));
     pw.print("\t"+dateFormat.format(new Date(time2)));
     pw.print("\t            COMPLETE");
     pw.print("\t           host:1234");
+    pw.print("\t    http://host:2345");
     pw.println("\t                             logURL");
     pw.print(" container_1234_0005_01_000003");
     pw.print("\t"+dateFormat.format(new Date(time1)));
     pw.print("\t                 N/A");
     pw.print("\t             RUNNING");
     pw.print("\t           host:1234");
+    pw.print("\t    http://host:2345");
     pw.println("\t                                   ");
     pw.close();
     String appReportStr = baos.toString("UTF-8");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java
index 18c452f..1f0405f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java
@@ -338,4 +338,23 @@ public class ContainerReportPBImpl extends ContainerReport {
       ContainerStateProto containerState) {
     return ProtoUtils.convertFromProtoFormat(containerState);
   }
+
+  @Override
+  public String getNodeHttpAddress() {
+    ContainerReportProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasNodeHttpAddress()) {
+      return null;
+    }
+    return (p.getNodeHttpAddress());
+  }
+
+  @Override
+  public void setNodeHttpAddress(String nodeHttpAddress) {
+    maybeInitBuilder();
+    if (nodeHttpAddress == null) {
+      builder.clearNodeHttpAddress();
+      return;
+    }
+    builder.setNodeHttpAddress(nodeHttpAddress);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
index 803dc01..c7cf07b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
@@ -215,7 +215,7 @@ public class ApplicationHistoryManagerImpl extends AbstractService implements
       containerHistory.getStartTime(), containerHistory.getFinishTime(),
       containerHistory.getDiagnosticsInfo(), logUrl,
       containerHistory.getContainerExitStatus(),
-      containerHistory.getContainerState());
+      containerHistory.getContainerState(), null);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 22418a8..1010f62 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -415,6 +415,7 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
     String diagnosticsInfo = null;
     int exitStatus = ContainerExitStatus.INVALID;
     ContainerState state = null;
+    String nodeHttpAddress = null;
     Map<String, Object> entityInfo = entity.getOtherInfo();
     if (entityInfo != null) {
       if (entityInfo
@@ -444,6 +445,12 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
         allocatedPriority = (Integer) entityInfo.get(
                 ContainerMetricsConstants.ALLOCATED_PRIORITY_ENTITY_INFO);
       }
+      if (entityInfo.containsKey(
+          ContainerMetricsConstants.ALLOCATED_HOST_HTTP_ADDRESS_ENTITY_INFO)) {
+        nodeHttpAddress =
+            (String) entityInfo
+              .get(ContainerMetricsConstants.ALLOCATED_HOST_HTTP_ADDRESS_ENTITY_INFO);
+      }
     }
     List<TimelineEvent> events = entity.getEvents();
     if (events != null) {
@@ -493,7 +500,8 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
         Resource.newInstance(allocatedMem, allocatedVcore),
         NodeId.newInstance(allocatedHost, allocatedPort),
         Priority.newInstance(allocatedPriority),
-        createdTime, finishedTime, diagnosticsInfo, logUrl, exitStatus, state);
+        createdTime, finishedTime, diagnosticsInfo, logUrl, exitStatus, state,
+        nodeHttpAddress);
   }
 
   private ApplicationReportExt generateApplicationReport(TimelineEntity entity,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ContainerMetricsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ContainerMetricsConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ContainerMetricsConstants.java
index 8791da4..0d5540d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ContainerMetricsConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ContainerMetricsConstants.java
@@ -57,4 +57,6 @@ public class ContainerMetricsConstants {
   public static final String STATE_EVENT_INFO =
       "YARN_CONTAINER_STATE";
 
+  public static final String ALLOCATED_HOST_HTTP_ADDRESS_ENTITY_INFO =
+      "YARN_CONTAINER_ALLOCATED_HOST_HTTP_ADDRESS";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
index 1bba4d8..4a82c93 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
@@ -199,12 +199,14 @@ public class AppAttemptBlock extends HtmlBlock {
         .append(url("container", container.getContainerId()))
         .append("'>")
         .append(container.getContainerId())
-        .append("</a>\",\"<a href='")
-        .append("#") // TODO: replace with node http address (YARN-1884)
+        .append("</a>\",\"<a ")
+        .append(
+          container.getNodeHttpAddress() == null ? "#" : "href='"
+              + container.getNodeHttpAddress())
         .append("'>")
-        .append(container.getAssignedNodeId() == null ? "N/A" :
+        .append(container.getNodeHttpAddress() == null ? "N/A" :
             StringEscapeUtils.escapeJavaScript(StringEscapeUtils
-                .escapeHtml(container.getAssignedNodeId())))
+                .escapeHtml(container.getNodeHttpAddress())))
         .append("</a>\",\"")
         .append(container.getContainerExitStatus()).append("\",\"<a href='")
         .append(container.getLogUrl() == null ?
@@ -271,4 +273,4 @@ public class AppAttemptBlock extends HtmlBlock {
     }
     return false;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index d4c20d4..5fc5fa0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -285,15 +285,12 @@ public class AppBlock extends HtmlBlock {
       }
       long startTime = 0L;
       String logsLink = null;
+      String nodeLink = null;
       if (containerReport != null) {
         ContainerInfo container = new ContainerInfo(containerReport);
         startTime = container.getStartedTime();
         logsLink = containerReport.getLogUrl();
-      }
-      String nodeLink = null;
-      if (appAttempt.getHost() != null && appAttempt.getRpcPort() >= 0
-          && appAttempt.getRpcPort() < 65536) {
-        nodeLink = appAttempt.getHost() + ":" + appAttempt.getRpcPort();
+        nodeLink = containerReport.getNodeHttpAddress();
       }
       // AppAttemptID numerical value parsed by parseHadoopID in
       // yarn.dt.plugins.js
@@ -304,8 +301,8 @@ public class AppBlock extends HtmlBlock {
         .append(appAttempt.getAppAttemptId())
         .append("</a>\",\"")
         .append(startTime)
-        .append("\",\"<a href='")
-        .append("#") // TODO: replace with node http address (YARN-1884)
+        .append("\",\"<a ")
+        .append(nodeLink == null ? "#" : "href='" + nodeLink)
         .append("'>")
         .append(nodeLink == null ? "N/A" : StringEscapeUtils
             .escapeJavaScript(StringEscapeUtils.escapeHtml(nodeLink)))

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java
index ed50c7a..cae8d2e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java
@@ -104,7 +104,12 @@ public class ContainerBlock extends HtmlBlock {
         container.getContainerState() == null ? UNAVAILABLE : container
           .getContainerState())
       ._("Exit Status:", container.getContainerExitStatus())
-      ._("Node:", container.getAssignedNodeId())
+      ._(
+        "Node:",
+        container.getNodeHttpAddress() == null ? "#" : container
+          .getNodeHttpAddress(),
+        container.getNodeHttpAddress() == null ? "N/A" : container
+          .getNodeHttpAddress())
       ._("Priority:", container.getPriority())
       ._("Started:", Times.format(container.getStartedTime()))
       ._(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
index bdcc7b2..0d18e7a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
@@ -42,6 +42,7 @@ public class ContainerInfo {
   protected String logUrl;
   protected int containerExitStatus;
   protected ContainerState containerState;
+  protected String nodeHttpAddress;
 
   public ContainerInfo() {
     // JAXB needs this
@@ -64,6 +65,7 @@ public class ContainerInfo {
     logUrl = container.getLogUrl();
     containerExitStatus = container.getContainerExitStatus();
     containerState = container.getContainerState();
+    nodeHttpAddress = container.getNodeHttpAddress();
   }
 
   public String getContainerId() {
@@ -114,4 +116,7 @@ public class ContainerInfo {
     return containerState;
   }
 
+  public String getNodeHttpAddress() {
+    return nodeHttpAddress;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ContainerCreatedEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ContainerCreatedEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ContainerCreatedEvent.java
index eeda181..05b6781 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ContainerCreatedEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ContainerCreatedEvent.java
@@ -29,18 +29,21 @@ public class ContainerCreatedEvent extends SystemMetricsEvent {
   private Resource allocatedResource;
   private NodeId allocatedNode;
   private Priority allocatedPriority;
+  private String nodeHttpAddress;
 
   public ContainerCreatedEvent(
       ContainerId containerId,
       Resource allocatedResource,
       NodeId allocatedNode,
       Priority allocatedPriority,
-      long createdTime) {
+      long createdTime,
+      String nodeHttpAddress) {
     super(SystemMetricsEventType.CONTAINER_CREATED, createdTime);
     this.containerId = containerId;
     this.allocatedResource = allocatedResource;
     this.allocatedNode = allocatedNode;
     this.allocatedPriority = allocatedPriority;
+    this.nodeHttpAddress = nodeHttpAddress;
   }
 
   @Override
@@ -64,4 +67,7 @@ public class ContainerCreatedEvent extends SystemMetricsEvent {
     return allocatedPriority;
   }
 
+  public String getNodeHttpAddress() {
+    return nodeHttpAddress;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
index 3adf519..b849b00 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
@@ -181,7 +181,7 @@ public class SystemMetricsPublisher extends CompositeService {
               container.getAllocatedResource(),
               container.getAllocatedNode(),
               container.getAllocatedPriority(),
-              createdTime));
+              createdTime, container.getNodeHttpAddress()));
     }
   }
 
@@ -388,6 +388,9 @@ public class SystemMetricsPublisher extends CompositeService {
         event.getAllocatedNode().getPort());
     entityInfo.put(ContainerMetricsConstants.ALLOCATED_PRIORITY_ENTITY_INFO,
         event.getAllocatedPriority().getPriority());
+    entityInfo.put(
+      ContainerMetricsConstants.ALLOCATED_HOST_HTTP_ADDRESS_ENTITY_INFO,
+      event.getNodeHttpAddress());
     entity.setOtherInfo(entityInfo);
     TimelineEvent tEvent = new TimelineEvent();
     tEvent.setEventType(ContainerMetricsConstants.CREATED_EVENT_TYPE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
index 9e9dcb9..20087f5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
@@ -79,4 +79,5 @@ public interface RMContainer extends EventHandler<RMContainerEvent> {
   
   List<ResourceRequest> getResourceRequests();
 
+  String getNodeHttpAddress();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index e37d8fd..38a03ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRunningOnNodeEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAllocatedEvent;
@@ -573,10 +572,29 @@ public class RMContainerImpl implements RMContainer {
           this.getAllocatedResource(), this.getAllocatedNode(),
           this.getAllocatedPriority(), this.getCreationTime(),
           this.getFinishTime(), this.getDiagnosticsInfo(), this.getLogURL(),
-          this.getContainerExitStatus(), this.getContainerState());
+          this.getContainerExitStatus(), this.getContainerState(),
+          this.getNodeHttpAddress());
     } finally {
       this.readLock.unlock();
     }
     return containerReport;
   }
+
+  @Override
+  public String getNodeHttpAddress() {
+    try {
+      readLock.lock();
+      if (container.getNodeHttpAddress() != null) {
+        StringBuilder httpAddress = new StringBuilder();
+        httpAddress.append(WebAppUtils.getHttpSchemePrefix(rmContext
+            .getYarnConfiguration()));
+        httpAddress.append(container.getNodeHttpAddress());
+        return httpAddress.toString();
+      } else {
+        return null;
+      }
+    } finally {
+      readLock.unlock();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85f6d67f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
index 9f02721..7ed3835 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.AHSWebApp;
 import org.apache.hadoop.yarn.server.metrics.AppAttemptMetricsConstants;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
 import org.apache.hadoop.yarn.server.metrics.ContainerMetricsConstants;
@@ -386,6 +385,10 @@ public class TestSystemMetricsPublisher {
     when(container.getDiagnosticsInfo()).thenReturn("test diagnostics info");
     when(container.getContainerExitStatus()).thenReturn(-1);
     when(container.getContainerState()).thenReturn(ContainerState.COMPLETE);
+    Container mockContainer = mock(Container.class);
+    when(container.getContainer()).thenReturn(mockContainer);
+    when(mockContainer.getNodeHttpAddress())
+      .thenReturn("http://localhost:1234");
     return container;
   }
 


[08/49] hadoop git commit: HDFS-7818. OffsetParam should return the default value instead of throwing NPE when the value is unspecified. Contributed by Eric Payne.

Posted by zj...@apache.org.
HDFS-7818. OffsetParam should return the default value instead of throwing NPE when the value is unspecified. Contributed by Eric Payne.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7971030
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7971030
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7971030

Branch: refs/heads/YARN-2928
Commit: c79710302ee51e1a9ee17dadb161c69bb3aba5c9
Parents: 21101c0
Author: Haohui Mai <wh...@apache.org>
Authored: Fri Mar 6 14:26:23 2015 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Fri Mar 6 14:26:23 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt      |  3 +++
 .../datanode/web/webhdfs/ParameterParser.java    |  2 +-
 .../hadoop/hdfs/web/resources/OffsetParam.java   |  5 +++++
 .../web/webhdfs/TestParameterParser.java         | 19 +++++++++++++++++++
 4 files changed, 28 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7971030/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e622a57..b443902 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1107,6 +1107,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7885. Datanode should not trust the generation stamp provided by
     client. (Tsz Wo Nicholas Sze via jing9)
 
+    HDFS-7818. OffsetParam should return the default value instead of throwing
+    NPE when the value is unspecified. (Eric Payne via wheat9)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7971030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
index 5749504..2baafe8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
@@ -62,7 +62,7 @@ class ParameterParser {
   }
 
   long offset() {
-    return new OffsetParam(param(OffsetParam.NAME)).getValue();
+    return new OffsetParam(param(OffsetParam.NAME)).getOffset();
   }
 
   String namenodeId() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7971030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
index 6973787..6d88703 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
@@ -46,4 +46,9 @@ public class OffsetParam extends LongParam {
   public String getName() {
     return NAME;
   }
+
+  public Long getOffset() {
+    Long offset = getValue();
+    return (offset == null) ? Long.valueOf(0) : offset;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7971030/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
index 6a6c5d0..8aee1d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.web.resources.DelegationParam;
 import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam;
+import org.apache.hadoop.hdfs.web.resources.OffsetParam;
 import org.apache.hadoop.security.token.Token;
 import org.junit.Assert;
 import org.junit.Test;
@@ -65,4 +66,22 @@ public class TestParameterParser {
     ParameterParser testParser = new ParameterParser(decoder, conf);
     Assert.assertEquals(EXPECTED_PATH, testParser.path());
   }
+
+  @Test
+  public void testOffset() throws IOException {
+    final long X = 42;
+
+    long offset = new OffsetParam(Long.toString(X)).getOffset();
+    Assert.assertEquals("OffsetParam: ", X, offset);
+
+    offset = new OffsetParam((String) null).getOffset();
+    Assert.assertEquals("OffsetParam with null should have defaulted to 0", 0, offset);
+
+    try {
+      offset = new OffsetParam("abc").getValue();
+      Assert.fail("OffsetParam with nondigit value should have thrown IllegalArgumentException");
+    } catch (IllegalArgumentException iae) {
+      // Ignore
+    }
+  }
 }


[21/49] hadoop git commit: HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN. Contributed by Duo Zhang.

Posted by zj...@apache.org.
HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN. Contributed by Duo Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42e3a805
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42e3a805
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42e3a805

Branch: refs/heads/YARN-2928
Commit: 42e3a805117ff7cb054c2442f7b0e0cc54be63ad
Parents: e43882e
Author: Haohui Mai <wh...@apache.org>
Authored: Mon Mar 9 11:07:40 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Mon Mar 9 11:07:40 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../java/org/apache/hadoop/minikdc/MiniKdc.java | 17 ++++++-----
 .../minikdc/TestChangeOrgNameAndDomain.java     | 32 ++++++++++++++++++++
 3 files changed, 45 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42e3a805/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6f2c8c3..37604c4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1067,6 +1067,9 @@ Release 2.7.0 - UNRELEASED
 
     HADOOP-11670. Regression: s3a auth setup broken. (Adam Budde via stevel)
 
+    HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN.
+    (Duo Zhang via wheat9)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42e3a805/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
index a649bd2..9388360 100644
--- a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
+++ b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
@@ -36,6 +36,7 @@ import org.apache.directory.server.core.kerberos.KeyDerivationInterceptor;
 import org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmIndex;
 import org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmPartition;
 import org.apache.directory.server.core.partition.ldif.LdifPartition;
+import org.apache.directory.server.kerberos.KerberosConfig;
 import org.apache.directory.server.kerberos.kdc.KdcServer;
 import org.apache.directory.server.kerberos.shared.crypto.encryption.KerberosKeyFactory;
 import org.apache.directory.server.kerberos.shared.keytab.Keytab;
@@ -418,7 +419,15 @@ public class MiniKdc {
       IOUtils.closeQuietly(is1);
     }
 
-    kdc = new KdcServer();
+    KerberosConfig kerberosConfig = new KerberosConfig();
+    kerberosConfig.setMaximumRenewableLifetime(Long.parseLong(conf
+        .getProperty(MAX_RENEWABLE_LIFETIME)));
+    kerberosConfig.setMaximumTicketLifetime(Long.parseLong(conf
+        .getProperty(MAX_TICKET_LIFETIME)));
+    kerberosConfig.setSearchBaseDn(String.format("dc=%s,dc=%s", orgName,
+        orgDomain));
+    kerberosConfig.setPaEncTimestampRequired(false);
+    kdc = new KdcServer(kerberosConfig);
     kdc.setDirectoryService(ds);
 
     // transport
@@ -431,12 +440,6 @@ public class MiniKdc {
       throw new IllegalArgumentException("Invalid transport: " + transport);
     }
     kdc.setServiceName(conf.getProperty(INSTANCE));
-    kdc.getConfig().setMaximumRenewableLifetime(
-            Long.parseLong(conf.getProperty(MAX_RENEWABLE_LIFETIME)));
-    kdc.getConfig().setMaximumTicketLifetime(
-            Long.parseLong(conf.getProperty(MAX_TICKET_LIFETIME)));
-
-    kdc.getConfig().setPaEncTimestampRequired(false);
     kdc.start();
 
     StringBuilder sb = new StringBuilder();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42e3a805/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestChangeOrgNameAndDomain.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestChangeOrgNameAndDomain.java b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestChangeOrgNameAndDomain.java
new file mode 100644
index 0000000..3843130
--- /dev/null
+++ b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestChangeOrgNameAndDomain.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.minikdc;
+
+import java.util.Properties;
+
+public class TestChangeOrgNameAndDomain extends TestMiniKdc {
+
+  @Override
+  public void createMiniKdcConf() {
+    super.createMiniKdcConf();
+    Properties properties = getConf();
+    properties.setProperty(MiniKdc.ORG_NAME, "APACHE");
+    properties.setProperty(MiniKdc.ORG_DOMAIN, "COM");
+  }
+
+}


[36/49] hadoop git commit: YARN-2280. Resource manager web service fields are not accessible (Krisztian Horvath via aw)

Posted by zj...@apache.org.
YARN-2280. Resource manager web service fields are not accessible (Krisztian Horvath via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5cf985b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5cf985b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5cf985b

Branch: refs/heads/YARN-2928
Commit: a5cf985bf501fd032124d121dcae80538db9e380
Parents: 64eb068
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Mar 10 16:32:20 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Tue Mar 10 16:32:20 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                   | 3 +++
 .../hadoop/yarn/server/resourcemanager/webapp/dao/NodesInfo.java  | 3 +++
 .../yarn/server/resourcemanager/webapp/dao/SchedulerTypeInfo.java | 3 +++
 3 files changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5cf985b/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 82134db..d882f3e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -26,6 +26,9 @@ Trunk - Unreleased
     YARN-3199. Fair Scheduler documentation improvements (Rohit Agarwal via
     aw)
 
+    YARN-2280. Resource manager web service fields are not accessible
+    (Krisztian Horvath via aw)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5cf985b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesInfo.java
index 7be9a6f..7dacd10 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesInfo.java
@@ -36,4 +36,7 @@ public class NodesInfo {
     node.add(nodeinfo);
   }
 
+  public ArrayList<NodeInfo> getNodes() {
+    return node;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5cf985b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerTypeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerTypeInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerTypeInfo.java
index 34078f1..22018d0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerTypeInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerTypeInfo.java
@@ -34,4 +34,7 @@ public class SchedulerTypeInfo {
     this.schedulerInfo = scheduler;
   }
 
+  public SchedulerInfo getSchedulerInfo() {
+    return schedulerInfo;
+  }
 }


[13/49] hadoop git commit: YARN-3296. Mark ResourceCalculatorProcessTree class as Public for configurable resource monitoring. Contributed by Hitesh Shah

Posted by zj...@apache.org.
YARN-3296. Mark ResourceCalculatorProcessTree class as Public for configurable resource monitoring. Contributed by Hitesh Shah


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ce3c763
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ce3c763
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ce3c763

Branch: refs/heads/YARN-2928
Commit: 7ce3c7635392c32f0504191ddd8417fb20509caa
Parents: 6444349
Author: Junping Du <ju...@apache.org>
Authored: Sun Mar 8 14:47:35 2015 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Sun Mar 8 14:47:35 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +++
 .../util/ResourceCalculatorProcessTree.java     | 25 ++++++++++++++------
 2 files changed, 21 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ce3c763/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 250fc1c..f28e932 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -366,6 +366,9 @@ Release 2.7.0 - UNRELEASED
     YARN-2190. Added CPU and memory limit options to the default container
     executor for Windows containers. (Chuan Liu via jianhe)
 
+    YARN-3296. Mark ResourceCalculatorProcessTree class as Public for configurable
+    resource monitoring. (Hitesh Shah via junping_du)
+
   OPTIMIZATIONS
 
     YARN-2990. FairScheduler's delay-scheduling always waits for node-local and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ce3c763/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
index 8c22c9e..6ee8834 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
@@ -22,7 +22,8 @@ import java.lang.reflect.Constructor;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 
@@ -30,7 +31,8 @@ import org.apache.hadoop.conf.Configured;
  * Interface class to obtain process resource usage
  *
  */
-@Private
+@Public
+@Evolving
 public abstract class ResourceCalculatorProcessTree extends Configured {
   static final Log LOG = LogFactory
       .getLog(ResourceCalculatorProcessTree.class);
@@ -90,9 +92,12 @@ public abstract class ResourceCalculatorProcessTree extends Configured {
    * @param olderThanAge processes above this age are included in the
    *                      memory addition
    * @return cumulative virtual memory used by the process-tree in bytes,
-   *          for processes older than this age.
+   *          for processes older than this age. return 0 if it cannot be
+   *          calculated
    */
-  public abstract long getCumulativeVmem(int olderThanAge);
+  public long getCumulativeVmem(int olderThanAge) {
+    return 0;
+  }
 
   /**
    * Get the cumulative resident set size (rss) memory used by all the processes
@@ -104,7 +109,9 @@ public abstract class ResourceCalculatorProcessTree extends Configured {
    *          for processes older than this age. return 0 if it cannot be
    *          calculated
    */
-  public abstract long getCumulativeRssmem(int olderThanAge);
+  public long getCumulativeRssmem(int olderThanAge) {
+    return 0;
+  }
 
   /**
    * Get the CPU time in millisecond used by all the processes in the
@@ -113,7 +120,9 @@ public abstract class ResourceCalculatorProcessTree extends Configured {
    * @return cumulative CPU time in millisecond since the process-tree created
    *         return 0 if it cannot be calculated
    */
-  public abstract long getCumulativeCpuTime();
+  public long getCumulativeCpuTime() {
+    return 0;
+  }
 
   /**
    * Get the CPU usage by all the processes in the process-tree based on
@@ -123,7 +132,9 @@ public abstract class ResourceCalculatorProcessTree extends Configured {
    * @return percentage CPU usage since the process-tree was created
    *         return {@link CpuTimeTracker#UNAVAILABLE} if it cannot be calculated
    */
-  public abstract float getCpuUsagePercent();
+  public float getCpuUsagePercent() {
+    return -1;
+  }
 
   /** Verify that the tree process id is same as its process group id.
    * @return true if the process id matches else return false.


[41/49] hadoop git commit: HADOOP-11693. Azure Storage FileSystem rename operations are throttled too aggressively to complete HBase WAL archiving. Contributed by Duo Xu.

Posted by zj...@apache.org.
HADOOP-11693. Azure Storage FileSystem rename operations are throttled too aggressively to complete HBase WAL archiving. Contributed by Duo Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a346bcb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a346bcb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a346bcb

Branch: refs/heads/YARN-2928
Commit: 7a346bcb4fa5b56191ed00a39e72e51c9bdf1b56
Parents: fb34f45
Author: cnauroth <cn...@apache.org>
Authored: Wed Mar 11 14:36:51 2015 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Wed Mar 11 14:36:51 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../fs/azure/AzureNativeFileSystemStore.java    | 55 ++++++++++++++++++--
 .../hadoop/fs/azure/StorageInterface.java       |  6 ++-
 .../hadoop/fs/azure/StorageInterfaceImpl.java   |  4 +-
 .../hadoop/fs/azure/MockStorageInterface.java   |  2 +-
 5 files changed, 63 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a346bcb/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5bca61f..fb699bc 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1088,6 +1088,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11618. DelegateToFileSystem erroneously uses default FS's port in
     constructor. (Brahma Reddy Battula via gera)
 
+    HADOOP-11693. Azure Storage FileSystem rename operations are throttled too
+    aggressively to complete HBase WAL archiving. (Duo Xu via cnauroth)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a346bcb/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index b664fe7..c6ba84f 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -134,6 +134,15 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   private static final String KEY_MAX_BACKOFF_INTERVAL = "fs.azure.io.retry.max.backoff.interval";
   private static final String KEY_BACKOFF_INTERVAL = "fs.azure.io.retry.backoff.interval";
   private static final String KEY_MAX_IO_RETRIES = "fs.azure.io.retry.max.retries";
+  
+  private static final String KEY_COPYBLOB_MIN_BACKOFF_INTERVAL = 
+    "fs.azure.io.copyblob.retry.min.backoff.interval";
+  private static final String KEY_COPYBLOB_MAX_BACKOFF_INTERVAL = 
+    "fs.azure.io.copyblob.retry.max.backoff.interval";
+  private static final String KEY_COPYBLOB_BACKOFF_INTERVAL = 
+    "fs.azure.io.copyblob.retry.backoff.interval";
+  private static final String KEY_COPYBLOB_MAX_IO_RETRIES = 
+    "fs.azure.io.copyblob.retry.max.retries";  
 
   private static final String KEY_SELF_THROTTLE_ENABLE = "fs.azure.selfthrottling.enable";
   private static final String KEY_SELF_THROTTLE_READ_FACTOR = "fs.azure.selfthrottling.read.factor";
@@ -199,6 +208,11 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   private static final int DEFAULT_MAX_BACKOFF_INTERVAL = 30 * 1000; // 30s
   private static final int DEFAULT_BACKOFF_INTERVAL = 1 * 1000; // 1s
   private static final int DEFAULT_MAX_RETRY_ATTEMPTS = 15;
+  
+  private static final int DEFAULT_COPYBLOB_MIN_BACKOFF_INTERVAL = 3  * 1000;
+  private static final int DEFAULT_COPYBLOB_MAX_BACKOFF_INTERVAL = 90 * 1000;
+  private static final int DEFAULT_COPYBLOB_BACKOFF_INTERVAL = 30 * 1000;
+  private static final int DEFAULT_COPYBLOB_MAX_RETRY_ATTEMPTS = 15;  
 
   // Self-throttling defaults. Allowed range = (0,1.0]
   // Value of 1.0 means no self-throttling.
@@ -2435,11 +2449,46 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
       // Rename the source blob to the destination blob by copying it to
       // the destination blob then deleting it.
       //
-      dstBlob.startCopyFromBlob(srcUri, getInstrumentedContext());
+      // Copy blob operation in Azure storage is very costly. It will be highly
+      // likely throttled during Azure storage gc. Short term fix will be using
+      // a more intensive exponential retry policy when the cluster is getting 
+      // throttled.
+      try {
+        dstBlob.startCopyFromBlob(srcUri, null, getInstrumentedContext());
+      } catch (StorageException se) {
+        if (se.getErrorCode().equals(
+		  StorageErrorCode.SERVER_BUSY.toString())) {
+          int copyBlobMinBackoff = sessionConfiguration.getInt(
+            KEY_COPYBLOB_MIN_BACKOFF_INTERVAL,
+			DEFAULT_COPYBLOB_MIN_BACKOFF_INTERVAL);
+
+          int copyBlobMaxBackoff = sessionConfiguration.getInt(
+            KEY_COPYBLOB_MAX_BACKOFF_INTERVAL,
+			DEFAULT_COPYBLOB_MAX_BACKOFF_INTERVAL);
+
+          int copyBlobDeltaBackoff = sessionConfiguration.getInt(
+            KEY_COPYBLOB_BACKOFF_INTERVAL,
+			DEFAULT_COPYBLOB_BACKOFF_INTERVAL);
+
+          int copyBlobMaxRetries = sessionConfiguration.getInt(
+            KEY_COPYBLOB_MAX_IO_RETRIES,
+			DEFAULT_COPYBLOB_MAX_RETRY_ATTEMPTS);
+	        
+          BlobRequestOptions options = new BlobRequestOptions();
+          options.setRetryPolicyFactory(new RetryExponentialRetry(
+            copyBlobMinBackoff, copyBlobDeltaBackoff, copyBlobMaxBackoff, 
+			copyBlobMaxRetries));
+          dstBlob.startCopyFromBlob(srcUri, options, getInstrumentedContext());
+        } else {
+          throw se;
+        }
+      }
       waitForCopyToComplete(dstBlob, getInstrumentedContext());
-
       safeDelete(srcBlob, lease);
-    } catch (Exception e) {
+    } catch (StorageException e) {
+      // Re-throw exception as an Azure storage exception.
+      throw new AzureException(e);
+    } catch (URISyntaxException e) {
       // Re-throw exception as an Azure storage exception.
       throw new AzureException(e);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a346bcb/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
index 91928a2..e89151d 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
@@ -383,6 +383,10 @@ abstract class StorageInterface {
      *
      * @param source
      *            A <code>java.net.URI</code> The URI of a source blob.
+     * @param options
+     *            A {@link BlobRequestOptions} object that specifies any additional options for the request. Specifying
+     *            <code>null</code> will use the default request options from the associated service client (
+     *            {@link CloudBlobClient}).
      * @param opContext
      *            An {@link OperationContext} object that represents the context for the current operation. This object
      *            is used to track requests to the storage service, and to provide additional runtime information about
@@ -394,7 +398,7 @@ abstract class StorageInterface {
      *
      */
     public abstract void startCopyFromBlob(URI source,
-        OperationContext opContext)
+        BlobRequestOptions options, OperationContext opContext)
         throws StorageException, URISyntaxException;
     
     /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a346bcb/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
index 2120536..90d4d88 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
@@ -393,11 +393,11 @@ class StorageInterfaceImpl extends StorageInterface {
     }
 
     @Override
-    public void startCopyFromBlob(URI source,
+    public void startCopyFromBlob(URI source, BlobRequestOptions options,
         OperationContext opContext)
             throws StorageException, URISyntaxException {
       getBlob().startCopyFromBlob(source,
-          null, null, null, opContext);
+          null, null, options, opContext);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a346bcb/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
index c51c05b..cde0e38 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
@@ -429,7 +429,7 @@ public class MockStorageInterface extends StorageInterface {
     }
 
     @Override
-    public void startCopyFromBlob(URI source,
+    public void startCopyFromBlob(URI source, BlobRequestOptions options,
         OperationContext opContext) throws StorageException, URISyntaxException {
       backingStore.copy(convertUriToDecodedString(source), convertUriToDecodedString(uri));
       //TODO: set the backingStore.properties.CopyState and


[32/49] hadoop git commit: Revert "HADOOP-11638. OpensslSecureRandom.c pthreads_thread_id should support FreeBSD and Solaris in addition to Linux (Kiran Kumar M R via Colin P. McCabe)"

Posted by zj...@apache.org.
Revert "HADOOP-11638. OpensslSecureRandom.c pthreads_thread_id should support FreeBSD and Solaris in addition to Linux (Kiran Kumar M R via Colin P.  McCabe)"

This reverts commit 3241fc2b17f11e621d8ffb6160caa4b850c278b6.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d5b01e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d5b01e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d5b01e0

Branch: refs/heads/YARN-2928
Commit: 8d5b01e005b2647262861acf522e9b5c6d6f8bba
Parents: 20b8ee1
Author: cnauroth <cn...@apache.org>
Authored: Tue Mar 10 11:02:07 2015 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Tue Mar 10 11:02:07 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt     |  4 ----
 .../hadoop/crypto/random/OpensslSecureRandom.c      | 16 +---------------
 2 files changed, 1 insertion(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5b01e0/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7d0cbee..ab58270 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -680,10 +680,6 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11642. Upgrade azure sdk version from 0.6.0 to 2.0.0.
     (Shashank Khandelwal and Ivan Mitic via cnauroth)
 
-    HADOOP-11638. OpensslSecureRandom.c pthreads_thread_id should support
-    FreeBSD and Solaris in addition to Linux (Kiran Kumar M R via Colin P.
-    McCabe)
-
   OPTIMIZATIONS
 
     HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5b01e0/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
index f30ccbe..6c31d10 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
@@ -29,10 +29,6 @@
 #include <sys/types.h>
 #endif
 
-#if defined(__FreeBSD__)
-#include <pthread_np.h>
-#endif
-
 #ifdef WINDOWS
 #include <windows.h>
 #endif
@@ -278,17 +274,7 @@ static void pthreads_locking_callback(int mode, int type, char *file, int line)
 
 static unsigned long pthreads_thread_id(void)
 {
-  unsigned long thread_id = 0;
-#if defined(__linux__)
-  thread_id = (unsigned long)syscall(SYS_gettid);
-#elif defined(__FreeBSD__)
-  thread_id = (unsigned long)pthread_getthreadid_np();
-#elif defined(__sun)
-  thread_id = (unsigned long)pthread_self();
-#else
-#error "Platform not supported"
-#endif
-  return thread_id;
+  return (unsigned long)syscall(SYS_gettid);
 }
 
 #endif /* UNIX */


[31/49] hadoop git commit: MAPREDUCE-4742. Fix typo in nnbench#displayUsage. Contributed by Liang Xie.

Posted by zj...@apache.org.
MAPREDUCE-4742. Fix typo in nnbench#displayUsage. Contributed by Liang Xie.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20b8ee13
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20b8ee13
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20b8ee13

Branch: refs/heads/YARN-2928
Commit: 20b8ee1350e62d1b21c951e653302b6e6a8e4f7e
Parents: d39bc90
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Wed Mar 11 00:41:12 2015 +0900
Committer: Tsuyoshi Ozawa <oz...@apache.org>
Committed: Wed Mar 11 00:41:12 2015 +0900

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                             | 2 ++
 .../src/test/java/org/apache/hadoop/hdfs/NNBench.java            | 4 ++--
 2 files changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20b8ee13/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 5f33041..eecf022 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -257,6 +257,8 @@ Release 2.8.0 - UNRELEASED
 
   BUG FIXES
 
+    MAPREDUCE-4742. Fix typo in nnbench#displayUsage. (Liang Xie via ozawa)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/20b8ee13/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
index bd46e7c..bb50213 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
@@ -182,8 +182,8 @@ public class NNBench {
       "\t-reduces <number of reduces. default is 1. This is not mandatory>\n" +
       "\t-startTime <time to start, given in seconds from the epoch. " +
       "Make sure this is far enough into the future, so all maps " +
-      "(operations) will start at the same time>. " +
-      "default is launch time + 2 mins. This is not mandatory \n" +
+      "(operations) will start at the same time. " +
+      "default is launch time + 2 mins. This is not mandatory>\n" +
       "\t-blockSize <Block size in bytes. default is 1. " + 
       "This is not mandatory>\n" +
       "\t-bytesToWrite <Bytes to write. default is 0. " + 


[33/49] hadoop git commit: YARN-3187. Documentation of Capacity Scheduler Queue mapping based on user or group. Contributed by Gururaj Shetty

Posted by zj...@apache.org.
YARN-3187. Documentation of Capacity Scheduler Queue mapping based on user or group. Contributed by Gururaj Shetty


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a380643d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a380643d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a380643d

Branch: refs/heads/YARN-2928
Commit: a380643d2044a4974e379965f65066df2055d003
Parents: 8d5b01e
Author: Jian He <ji...@apache.org>
Authored: Tue Mar 10 10:54:08 2015 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Mar 10 11:15:57 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +++
 .../src/site/markdown/CapacityScheduler.md      | 26 ++++++++++++++++++++
 2 files changed, 29 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a380643d/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a6dcb29..82134db 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -381,6 +381,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3296. Mark ResourceCalculatorProcessTree class as Public for configurable
     resource monitoring. (Hitesh Shah via junping_du)
 
+    YARN-3187. Documentation of Capacity Scheduler Queue mapping based on user
+    or group. (Gururaj Shetty via jianhe)
+
   OPTIMIZATIONS
 
     YARN-2990. FairScheduler's delay-scheduling always waits for node-local and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a380643d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index 3c32cdd..1cb963e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -69,6 +69,8 @@ The `CapacityScheduler` supports the following features:
 
 * **Resource-based Scheduling** - Support for resource-intensive applications, where-in a application can optionally specify higher resource-requirements than the default, there-by accomodating applications with differing resource requirements. Currently, *memory* is the the resource requirement supported.
 
+* **Queue Mapping based on User or Group** - This feature allows users to map a job to a specific queue based on the user or group.
+
 Configuration
 -------------
 
@@ -151,6 +153,30 @@ Configuration
 
 **Note:** An *ACL* is of the form *user1*, *user2spacegroup1*, *group2*. The special value of * implies *anyone*. The special value of *space* implies *no one*. The default is * for the root queue if not specified.
 
+  * Queue Mapping based on User or Group
+
+  The `CapacityScheduler` supports the following parameters to configure the queue mapping based on user or group:
+
+| Property | Description |
+|:---- |:---- |
+| `yarn.scheduler.capacity.queue-mappings` | This configuration specifies the mapping of user or group to aspecific queue. You can map a single user or a list of users to queues. Syntax: `[u or g]:[name]:[queue_name][,next_mapping]*`. Here, *u or g* indicates whether the mapping is for a user or group. The value is *u* for user and *g* for group. *name* indicates the user name or group name. To specify the user who has submitted the application, %user can be used. *queue_name* indicates the queue name for which the application has to be mapped. To specify queue name same as user name, *%user* can be used. To specify queue name same as the name of the primary group for which the user belongs to, *%primary_group* can be used.|
+| `yarn.scheduler.capacity.queue-mappings-override.enable` | This function is used to specify whether the user specified queues can be overridden. This is a Boolean value and the default value is *false*. |
+
+Example:
+
+```
+ <property>
+   <name>yarn.scheduler.capacity.queue-mappings</name>
+   <value>u:user1:queue1,g:group1:queue2,u:%user:%user,u:user2:%primary_group</value>
+   <description>
+     Here, <user1> is mapped to <queue1>, <group1> is mapped to <queue2>, 
+     maps users to queues with the same name as user, <user2> is mapped 
+     to queue name same as <primary group> respectively. The mappings will be 
+     evaluated from left to right, and the first valid mapping will be used.
+   </description>
+ </property>
+```
+
 ###Other Properties
 
   * Resource Calculator


[28/49] hadoop git commit: HADOOP-10115. Exclude duplicate jars in hadoop package under different component's lib (Vinayakumar B via aw)

Posted by zj...@apache.org.
HADOOP-10115. Exclude duplicate jars in hadoop package under different  component's lib (Vinayakumar B via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47f7f18d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47f7f18d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47f7f18d

Branch: refs/heads/YARN-2928
Commit: 47f7f18d4cc9145607ef3dfb70aa88748cd9dbec
Parents: 54639c7
Author: Allen Wittenauer <aw...@apache.org>
Authored: Mon Mar 9 21:44:06 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Mon Mar 9 21:44:06 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 hadoop-dist/pom.xml                             | 89 +++++++++++++++++---
 2 files changed, 78 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f7f18d/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index fa73ba1..f831d1a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -187,6 +187,9 @@ Trunk (Unreleased)
     HADOOP-11673. Skip using JUnit Assume in TestCodec. (Brahma Reddy Battula
     via cdouglas)
 
+    HADOOP-10115. Exclude duplicate jars in hadoop package under different
+    component's lib (Vinayakumar B via aw)
+
   BUG FIXES
 
     HADOOP-11473. test-patch says "-1 overall" even when all checks are +1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f7f18d/hadoop-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 0c82332..f894c01 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -107,25 +107,86 @@
                         fi
                       }
 
-                      ROOT=`cd ../..;pwd`
+                      findFileInDir(){
+                        local file="$1";
+                        local dir="${2:-./share}";
+                        local count=$(find "$dir" -iname "$file"|wc -l)
+                        echo "$count";
+                      }
+
+                      copyIfNotExists(){
+                        local src="$1"
+                        local srcName=$(basename "$src")
+                        local dest="$2";
+                        if [ -f "$src" ]; then
+                          if [[ "$srcName" != *.jar ]] || [ $(findFileInDir "$srcName") -eq "0" ]; then
+                            local destDir=$(dirname "$dest")
+                            mkdir -p "$destDir"
+                            cp "$src" "$dest"
+                          fi
+                        else
+                          for childPath in "$src"/* ;
+                          do
+                            child=$(basename "$childPath");
+                            if [ "$child" == "doc" ] || [ "$child" == "webapps" ]; then
+                              mkdir -p "$dest"/"$child"
+                              cp -r "$src"/"$child"/* "$dest"/"$child"
+                              continue;
+                            fi
+                            copyIfNotExists "$src"/"$child" "$dest"/"$child"
+                          done
+                        fi
+                      }
+
+                      #Copy all contents as is except the lib.
+                      #for libs check for existence in share directory, if not exist then only copy.
+                      copy(){
+                        local src="$1";
+                        local dest="$2";
+                        if [ -d "$src" ]; then
+                          for childPath in "$src"/* ;
+                          do
+                            child=$(basename "$childPath");
+                            if [ "$child" == "share" ]; then
+                              copyIfNotExists "$src"/"$child" "$dest"/"$child"
+                            else
+                              if [ -d "$src"/"$child" ]; then
+                                mkdir -p "$dest"/"$child"
+                                cp -r "$src"/"$child"/* "$dest"/"$child"
+                              else
+                                cp -r "$src"/"$child" "$dest"/"$child"
+                              fi
+                            fi
+                          done
+                        fi
+                      }
+
+                      # Shellcheck SC2086
+                      ROOT=$(cd "${project.build.directory}"/../..;pwd)
                       echo
-                      echo "Current directory `pwd`"
+                      echo "Current directory $(pwd)"
                       echo
                       run rm -rf hadoop-${project.version}
                       run mkdir hadoop-${project.version}
                       run cd hadoop-${project.version}
-                      run cp $ROOT/LICENSE.txt .
-                      run cp $ROOT/NOTICE.txt .
-                      run cp $ROOT/README.txt .
-                      run cp -r $ROOT/hadoop-common-project/hadoop-common/target/hadoop-common-${project.version}/* .
-                      run cp -r $ROOT/hadoop-common-project/hadoop-nfs/target/hadoop-nfs-${project.version}/* .
-                      run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${project.version}/* .
-                      run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-httpfs/target/hadoop-hdfs-httpfs-${project.version}/* .
-                      run cp -r $ROOT/hadoop-common-project/hadoop-kms/target/hadoop-kms-${project.version}/* .
-                      run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-nfs/target/hadoop-hdfs-nfs-${project.version}/* .
-                      run cp -r $ROOT/hadoop-yarn-project/target/hadoop-yarn-project-${project.version}/* .
-                      run cp -r $ROOT/hadoop-mapreduce-project/target/hadoop-mapreduce-${project.version}/* .
-                      run cp -r $ROOT/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${project.version}/* .
+                      run cp "$ROOT"/LICENSE.txt .
+                      run cp "$ROOT"/NOTICE.txt .
+                      run cp "$ROOT"/README.txt .
+
+                      # Copy hadoop-common first so that it have always have all dependencies.
+                      # Remaining projects will copy only libraries which are not present already in 'share' directory.
+                      run copy "$ROOT"/hadoop-common-project/hadoop-common/target/hadoop-common-${project.version} .
+                      run copy "$ROOT"/hadoop-common-project/hadoop-nfs/target/hadoop-nfs-${project.version} .
+                      run copy "$ROOT"/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${project.version} .
+                      run copy "$ROOT"/hadoop-hdfs-project/hadoop-hdfs-nfs/target/hadoop-hdfs-nfs-${project.version} .
+                      run copy "$ROOT"/hadoop-yarn-project/target/hadoop-yarn-project-${project.version} .
+                      run copy "$ROOT"/hadoop-mapreduce-project/target/hadoop-mapreduce-${project.version} .
+                      run copy "$ROOT"/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${project.version} .
+
+                      #copy httpfs and kms as is
+                      run cp -r "$ROOT"/hadoop-hdfs-project/hadoop-hdfs-httpfs/target/hadoop-hdfs-httpfs-${project.version}/* .
+                      run cp -r "$ROOT"/hadoop-common-project/hadoop-kms/target/hadoop-kms-${project.version}/* .
+
                       echo
                       echo "Hadoop dist layout available at: ${project.build.directory}/hadoop-${project.version}"
                       echo


[44/49] hadoop git commit: YARN-3338. Exclude jline dependency from YARN. Contributed by Zhijie Shen

Posted by zj...@apache.org.
YARN-3338. Exclude jline dependency from YARN. Contributed by Zhijie
Shen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06ce1d9a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06ce1d9a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06ce1d9a

Branch: refs/heads/YARN-2928
Commit: 06ce1d9a6cd9bec25e2f478b98264caf96a3ea44
Parents: ff83ae7
Author: Xuan <xg...@apache.org>
Authored: Thu Mar 12 10:25:00 2015 -0700
Committer: Xuan <xg...@apache.org>
Committed: Thu Mar 12 10:25:00 2015 -0700

----------------------------------------------------------------------
 hadoop-project/pom.xml          | 4 ++++
 hadoop-yarn-project/CHANGES.txt | 2 ++
 2 files changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06ce1d9a/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index a6127c7..6c95cf0 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -833,6 +833,10 @@
             <groupId>org.jboss.netty</groupId>
             <artifactId>netty</artifactId>
           </exclusion>
+          <exclusion>
+            <groupId>jline</groupId>
+            <artifactId>jline</artifactId>
+          </exclusion>
         </exclusions>
       </dependency>
       <dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06ce1d9a/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 969c6a1..11d1cc9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -755,6 +755,8 @@ Release 2.7.0 - UNRELEASED
     YARN-1884. Added nodeHttpAddress into ContainerReport and fixed the link to NM
     web page. (Xuan Gong via zjshen)
 
+    YARN-3338. Exclude jline dependency from YARN. (Zhijie Shen via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES


[23/49] hadoop git commit: HADOOP-11638. OpensslSecureRandom.c pthreads_thread_id should support FreeBSD and Solaris in addition to Linux (Kiran Kumar M R via Colin P. McCabe)

Posted by zj...@apache.org.
HADOOP-11638. OpensslSecureRandom.c pthreads_thread_id should support FreeBSD and Solaris in addition to Linux (Kiran Kumar M R via Colin P.  McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3241fc2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3241fc2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3241fc2b

Branch: refs/heads/YARN-2928
Commit: 3241fc2b17f11e621d8ffb6160caa4b850c278b6
Parents: de1101c
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Mon Mar 9 12:56:33 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Mon Mar 9 12:56:33 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt     |  4 ++++
 .../hadoop/crypto/random/OpensslSecureRandom.c      | 16 +++++++++++++++-
 2 files changed, 19 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3241fc2b/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index d5a8463..0fe5b7c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -671,6 +671,10 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11642. Upgrade azure sdk version from 0.6.0 to 2.0.0.
     (Shashank Khandelwal and Ivan Mitic via cnauroth)
 
+    HADOOP-11638. OpensslSecureRandom.c pthreads_thread_id should support
+    FreeBSD and Solaris in addition to Linux (Kiran Kumar M R via Colin P.
+    McCabe)
+
   OPTIMIZATIONS
 
     HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3241fc2b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
index 6c31d10..f30ccbe 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
@@ -29,6 +29,10 @@
 #include <sys/types.h>
 #endif
 
+#if defined(__FreeBSD__)
+#include <pthread_np.h>
+#endif
+
 #ifdef WINDOWS
 #include <windows.h>
 #endif
@@ -274,7 +278,17 @@ static void pthreads_locking_callback(int mode, int type, char *file, int line)
 
 static unsigned long pthreads_thread_id(void)
 {
-  return (unsigned long)syscall(SYS_gettid);
+  unsigned long thread_id = 0;
+#if defined(__linux__)
+  thread_id = (unsigned long)syscall(SYS_gettid);
+#elif defined(__FreeBSD__)
+  thread_id = (unsigned long)pthread_getthreadid_np();
+#elif defined(__sun)
+  thread_id = (unsigned long)pthread_self();
+#else
+#error "Platform not supported"
+#endif
+  return thread_id;
 }
 
 #endif /* UNIX */


[20/49] hadoop git commit: HDFS-7898. Change TestAppendSnapshotTruncate to fail-fast. Contributed by Tsz Wo Nicholas Sze.

Posted by zj...@apache.org.
HDFS-7898. Change TestAppendSnapshotTruncate to fail-fast. Contributed by Tsz Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e43882e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e43882e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e43882e8

Branch: refs/heads/YARN-2928
Commit: e43882e84ae44301eabd0122b5e5492da5fe9f66
Parents: 5578e22
Author: Jing Zhao <ji...@apache.org>
Authored: Mon Mar 9 10:52:17 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 10:52:17 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../hadoop/hdfs/TestAppendSnapshotTruncate.java | 61 +++++++++++++-------
 .../hdfs/server/namenode/TestFileTruncate.java  | 11 +++-
 3 files changed, 51 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e43882e8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e106b1a..094abfe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -734,6 +734,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7411. Change decommission logic to throttle by blocks rather than
     nodes in each interval. (Andrew Wang via cdouglas)
 
+    HDFS-7898. Change TestAppendSnapshotTruncate to fail-fast.
+    (Tsz Wo Nicholas Sze via jing9)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e43882e8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
index 5c4c7b4..e80e14f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
@@ -41,10 +41,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -69,6 +65,9 @@ public class TestAppendSnapshotTruncate {
   private static final int BLOCK_SIZE = 1024;
   private static final int DATANODE_NUM = 3;
   private static final short REPLICATION = 3;
+  private static final int FILE_WORKER_NUM = 3;
+  private static final long TEST_TIME_SECOND = 10;
+  private static final long TEST_TIMEOUT_SECOND = TEST_TIME_SECOND + 60;
 
   static final int SHORT_HEARTBEAT = 1;
   static final String[] EMPTY_STRINGS = {};
@@ -106,7 +105,7 @@ public class TestAppendSnapshotTruncate {
 
 
   /** Test randomly mixing append, snapshot and truncate operations. */
-  @Test
+  @Test(timeout=TEST_TIMEOUT_SECOND*1000)
   public void testAST() throws Exception {
     final String dirPathString = "/dir";
     final Path dir = new Path(dirPathString);
@@ -121,12 +120,12 @@ public class TestAppendSnapshotTruncate {
     }
     localDir.mkdirs();
 
-    final DirWorker w = new DirWorker(dir, localDir, 3);
+    final DirWorker w = new DirWorker(dir, localDir, FILE_WORKER_NUM);
     w.startAllFiles();
     w.start();
-    Worker.sleep(10L*1000);
+    Worker.sleep(TEST_TIME_SECOND * 1000);
     w.stop();
-    w.stoptAllFiles();
+    w.stopAllFiles();
     w.checkEverything();
   }
 
@@ -259,7 +258,7 @@ public class TestAppendSnapshotTruncate {
       }
     }
     
-    void stoptAllFiles() throws InterruptedException {
+    void stopAllFiles() throws InterruptedException {
       for(FileWorker f : files) { 
         f.stop();
       }
@@ -269,12 +268,12 @@ public class TestAppendSnapshotTruncate {
       LOG.info("checkEverything");
       for(FileWorker f : files) { 
         f.checkFullFile();
-        Preconditions.checkState(f.state.get() != State.ERROR);
+        f.checkErrorState();
       }
       for(String snapshot : snapshotPaths.keySet()) {
         checkSnapshot(snapshot);
       }
-      Preconditions.checkState(state.get() != State.ERROR);
+      checkErrorState();
     }
   }
 
@@ -364,7 +363,7 @@ public class TestAppendSnapshotTruncate {
       b.append(", newLength=").append(newLength)
        .append(", isReady=").append(isReady);
       if (!isReady) {
-        TestFileTruncate.checkBlockRecovery(file, dfs);
+        TestFileTruncate.checkBlockRecovery(file, dfs, 100, 300L);
       }
       return isReady;
     }
@@ -407,6 +406,7 @@ public class TestAppendSnapshotTruncate {
       IDLE(false), RUNNING(false), STOPPED(true), ERROR(true);
       
       final boolean isTerminated;
+
       State(boolean isTerminated) {
         this.isTerminated = isTerminated;
       }
@@ -416,11 +416,29 @@ public class TestAppendSnapshotTruncate {
     final AtomicReference<State> state = new AtomicReference<State>(State.IDLE);
     final AtomicBoolean isCalling = new AtomicBoolean();
     final AtomicReference<Thread> thread = new AtomicReference<Thread>();
-    
+
+    private Throwable thrown = null;
+
     Worker(String name) {
       this.name = name;
     }
 
+    State checkErrorState() {
+      final State s = state.get();
+      if (s == State.ERROR) {
+        throw new IllegalStateException(name + " has " + s, thrown);
+      }
+      return s;
+    }
+
+    void setErrorState(Throwable t) {
+      checkErrorState();
+
+      LOG.error("Worker " + name + " failed.", t);
+      state.set(State.ERROR);
+      thrown = t;
+    }
+
     void start() {
       Preconditions.checkState(state.compareAndSet(State.IDLE, State.RUNNING));
       
@@ -429,14 +447,13 @@ public class TestAppendSnapshotTruncate {
           @Override
           public void run() {
             final Random r = DFSUtil.getRandom();
-            for(State s; (s = state.get()) == State.RUNNING || s == State.IDLE;) {
+            for(State s; !(s = checkErrorState()).isTerminated;) {
               if (s == State.RUNNING) {
                 isCalling.set(true);
                 try {
                   LOG.info(call());
-                } catch (Exception e) {
-                  LOG.error("Worker " + name + " failed.", e);
-                  state.set(State.ERROR);
+                } catch(Throwable t) {
+                  setErrorState(t);
                   return;
                 }
                 isCalling.set(false);
@@ -451,7 +468,11 @@ public class TestAppendSnapshotTruncate {
     }
 
     boolean isPaused() {
-      return state.get() == State.IDLE && !isCalling.get();
+      final State s = checkErrorState();
+      if (s == State.STOPPED) {
+        throw new IllegalStateException(name + " is " + s);
+      }
+      return s == State.IDLE && !isCalling.get();
     }
 
     void pause() {
@@ -459,9 +480,7 @@ public class TestAppendSnapshotTruncate {
     }
 
     void stop() throws InterruptedException {
-      if (state.get() == State.ERROR) {
-        return;
-      }
+      checkErrorState();
 
       state.set(State.STOPPED);
       thread.get().join();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e43882e8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 19b5cde..b69d345 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -1151,8 +1151,13 @@ public class TestFileTruncate {
 
   public static void checkBlockRecovery(Path p, DistributedFileSystem dfs)
       throws IOException {
+    checkBlockRecovery(p, dfs, SUCCESS_ATTEMPTS, SLEEP);
+  }
+
+  public static void checkBlockRecovery(Path p, DistributedFileSystem dfs,
+      int attempts, long sleepMs) throws IOException {
     boolean success = false;
-    for(int i = 0; i < SUCCESS_ATTEMPTS; i++) {
+    for(int i = 0; i < attempts; i++) {
       LocatedBlocks blocks = getLocatedBlocks(p, dfs);
       boolean noLastBlock = blocks.getLastLocatedBlock() == null;
       if(!blocks.isUnderConstruction() &&
@@ -1160,9 +1165,9 @@ public class TestFileTruncate {
         success = true;
         break;
       }
-      try { Thread.sleep(SLEEP); } catch (InterruptedException ignored) {}
+      try { Thread.sleep(sleepMs); } catch (InterruptedException ignored) {}
     }
-    assertThat("inode should complete in ~" + SLEEP * SUCCESS_ATTEMPTS + " ms.",
+    assertThat("inode should complete in ~" + sleepMs * attempts + " ms.",
         success, is(true));
   }
 


[04/49] hadoop git commit: HDFS-7885. Datanode should not trust the generation stamp provided by client. Contributed by Tsz Wo Nicholas Sze.

Posted by zj...@apache.org.
HDFS-7885. Datanode should not trust the generation stamp provided by client. Contributed by Tsz Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24db0812
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24db0812
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24db0812

Branch: refs/heads/YARN-2928
Commit: 24db0812be64e83a48ade01fc1eaaeaedad4dec0
Parents: 95bfd08
Author: Jing Zhao <ji...@apache.org>
Authored: Fri Mar 6 10:55:56 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Fri Mar 6 10:55:56 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 15 +++++
 .../hadoop/hdfs/TestBlockReaderLocalLegacy.java | 63 ++++++++++++++++++++
 3 files changed, 81 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24db0812/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 763d327..e622a57 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1104,6 +1104,9 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7434. DatanodeID hashCode should not be mutable. (daryn via kihwal)
 
+    HDFS-7885. Datanode should not trust the generation stamp provided by
+    client. (Tsz Wo Nicholas Sze via jing9)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24db0812/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index cc6220a..58f5615 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2568,6 +2568,21 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   @Override // FsDatasetSpi
   public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
       throws IOException {
+    synchronized(this) {
+      final Replica replica = volumeMap.get(block.getBlockPoolId(),
+          block.getBlockId());
+      if (replica == null) {
+        throw new ReplicaNotFoundException(block);
+      }
+      if (replica.getGenerationStamp() < block.getGenerationStamp()) {
+        throw new IOException(
+            "Replica generation stamp < block generation stamp, block="
+            + block + ", replica=" + replica);
+      } else if (replica.getGenerationStamp() > block.getGenerationStamp()) {
+        block.setGenerationStamp(replica.getGenerationStamp());
+      }
+    }
+
     File datafile = getBlockFile(block);
     File metafile = FsDatasetUtil.getMetaFile(datafile, block.getGenerationStamp());
     BlockLocalPathInfo info = new BlockLocalPathInfo(block,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24db0812/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
index cb50539..1c4134f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
@@ -30,11 +30,16 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
 import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.BeforeClass;
@@ -153,4 +158,62 @@ public class TestBlockReaderLocalLegacy {
     Arrays.equals(orig, buf);
     cluster.shutdown();
   }
+
+  @Test(timeout=20000)
+  public void testBlockReaderLocalLegacyWithAppend() throws Exception {
+    final short REPL_FACTOR = 1;
+    final HdfsConfiguration conf = getConfiguration(null);
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
+
+    final MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    cluster.waitActive();
+
+    final DistributedFileSystem dfs = cluster.getFileSystem();
+    final Path path = new Path("/testBlockReaderLocalLegacy");
+    DFSTestUtil.createFile(dfs, path, 10, REPL_FACTOR, 0);
+    DFSTestUtil.waitReplication(dfs, path, REPL_FACTOR);
+
+    final ClientDatanodeProtocol proxy;
+    final Token<BlockTokenIdentifier> token;
+    final ExtendedBlock originalBlock;
+    final long originalGS;
+    {
+      final LocatedBlock lb = cluster.getNameNode().getRpcServer()
+          .getBlockLocations(path.toString(), 0, 1).get(0);
+      proxy = DFSUtil.createClientDatanodeProtocolProxy(
+          lb.getLocations()[0], conf, 60000, false);
+      token = lb.getBlockToken();
+
+      // get block and generation stamp
+      final ExtendedBlock blk = new ExtendedBlock(lb.getBlock());
+      originalBlock = new ExtendedBlock(blk);
+      originalGS = originalBlock.getGenerationStamp();
+
+      // test getBlockLocalPathInfo
+      final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(blk, token);
+      Assert.assertEquals(originalGS, info.getBlock().getGenerationStamp());
+    }
+
+    { // append one byte
+      FSDataOutputStream out = dfs.append(path);
+      out.write(1);
+      out.close();
+    }
+
+    {
+      // get new generation stamp
+      final LocatedBlock lb = cluster.getNameNode().getRpcServer()
+          .getBlockLocations(path.toString(), 0, 1).get(0);
+      final long newGS = lb.getBlock().getGenerationStamp();
+      Assert.assertTrue(newGS > originalGS);
+
+      // getBlockLocalPathInfo using the original block.
+      Assert.assertEquals(originalGS, originalBlock.getGenerationStamp());
+      final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(
+          originalBlock, token);
+      Assert.assertEquals(newGS, info.getBlock().getGenerationStamp());
+    }
+    cluster.shutdown();
+  }
 }


[49/49] hadoop git commit: Merge remote-tracking branch 'apache/trunk' into YARN-2928

Posted by zj...@apache.org.
Merge remote-tracking branch 'apache/trunk' into YARN-2928

Conflicts:
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb1b5960
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb1b5960
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb1b5960

Branch: refs/heads/YARN-2928
Commit: fb1b596006c7b59b6b72f3974e37e67f414f5667
Parents: 821b68d 8212877
Author: Zhijie Shen <zj...@apache.org>
Authored: Thu Mar 12 15:19:47 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 12 15:38:09 2015 -0700

----------------------------------------------------------------------
 .gitignore                                      |   1 +
 BUILDING.txt                                    |   9 +-
 hadoop-common-project/hadoop-common/CHANGES.txt |  53 ++
 .../src/main/bin/hadoop-daemons.sh              |  13 +-
 .../src/main/bin/hadoop-functions.sh            |  18 +-
 .../src/main/conf/shellprofile.d/example        | 106 ----
 .../src/main/conf/shellprofile.d/example.sh     | 106 ++++
 .../fs/CommonConfigurationKeysPublic.java       |   6 +-
 .../apache/hadoop/fs/DelegateToFileSystem.java  |   3 +-
 .../io/compress/bzip2/Bzip2Compressor.java      |   3 -
 .../io/compress/bzip2/Bzip2Decompressor.java    |   3 -
 .../hadoop/io/compress/lz4/Lz4Compressor.java   |   4 -
 .../hadoop/io/compress/lz4/Lz4Decompressor.java |   4 -
 .../io/compress/snappy/SnappyCompressor.java    |   4 -
 .../io/compress/snappy/SnappyDecompressor.java  |   4 -
 .../hadoop/io/compress/zlib/ZlibCompressor.java |   3 -
 .../io/compress/zlib/ZlibDecompressor.java      |   5 +-
 .../main/java/org/apache/hadoop/ipc/Client.java |  33 +-
 .../main/java/org/apache/hadoop/ipc/Server.java |   9 +-
 .../hadoop/security/LdapGroupsMapping.java      |  54 +-
 .../hadoop/io/compress/bzip2/Bzip2Compressor.c  |   9 +-
 .../io/compress/bzip2/Bzip2Decompressor.c       |   7 -
 .../hadoop/io/compress/lz4/Lz4Compressor.c      |  13 -
 .../hadoop/io/compress/lz4/Lz4Decompressor.c    |   8 -
 .../io/compress/snappy/SnappyCompressor.c       |   8 -
 .../io/compress/snappy/SnappyDecompressor.c     |   8 -
 .../hadoop/io/compress/zlib/ZlibCompressor.c    |  11 -
 .../hadoop/io/compress/zlib/ZlibDecompressor.c  |  10 -
 .../src/main/resources/core-default.xml         |  20 +
 .../hadoop-common/src/main/winutils/task.c      | 144 ++++-
 .../src/main/winutils/win8sdk.props             |  28 +
 .../src/main/winutils/winutils.vcxproj          |   3 +
 .../hadoop-common/src/site/markdown/Metrics.md  |   2 +
 .../hadoop/fs/TestDelegateToFileSystem.java     |  52 ++
 .../apache/hadoop/io/compress/TestCodec.java    |  16 +-
 .../bzip2/TestBzip2CompressorDecompressor.java  | 104 ++++
 .../lz4/TestLz4CompressorDecompressor.java      |  17 +
 .../TestSnappyCompressorDecompressor.java       |  17 +
 .../zlib/TestZlibCompressorDecompressor.java    |  17 +
 .../hadoop/security/TestLdapGroupsMapping.java  |  42 +-
 .../security/TestLdapGroupsMappingBase.java     |  77 +++
 .../TestLdapGroupsMappingWithPosixGroup.java    | 103 +++
 .../org/apache/hadoop/util/TestWinUtils.java    |  62 ++
 .../java/org/apache/hadoop/minikdc/MiniKdc.java |  17 +-
 .../minikdc/TestChangeOrgNameAndDomain.java     |  32 +
 hadoop-dist/pom.xml                             |  89 ++-
 .../hadoop/hdfs/nfs/conf/NfsConfigKeys.java     |  14 +
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java    |  12 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  41 ++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +-
 .../apache/hadoop/hdfs/HdfsConfiguration.java   |   2 +-
 .../server/blockmanagement/BlockManager.java    | 121 +---
 .../server/blockmanagement/DatanodeManager.java | 109 +---
 .../blockmanagement/DecommissionManager.java    | 619 +++++++++++++++++--
 .../hadoop/hdfs/server/common/Storage.java      |   2 +-
 .../hdfs/server/datanode/BPServiceActor.java    |   2 +
 .../hadoop/hdfs/server/datanode/DataNode.java   | 109 +++-
 .../hdfs/server/datanode/DataStorage.java       |  15 +-
 .../hdfs/server/datanode/DirectoryScanner.java  |  20 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |  16 +-
 .../impl/FsDatasetAsyncDiskService.java         |  31 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 153 +++--
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |   3 +-
 .../datanode/fsdataset/impl/FsVolumeList.java   |  41 +-
 .../datanode/metrics/DataNodeMetrics.java       |   5 +
 .../datanode/web/webhdfs/ParameterParser.java   |   2 +-
 .../hadoop/hdfs/web/resources/OffsetParam.java  |   5 +
 .../src/main/resources/hdfs-default.xml         |  23 +-
 .../hadoop-hdfs/src/main/shellprofile.d/hdfs    |  36 --
 .../hadoop-hdfs/src/main/shellprofile.d/hdfs.sh |  36 ++
 .../src/site/markdown/HdfsNfsGateway.md         |  44 +-
 .../src/site/xdoc/HdfsRollingUpgrade.xml        |   4 +-
 .../hadoop/hdfs/TestAppendSnapshotTruncate.java |  61 +-
 .../hadoop/hdfs/TestBlockReaderLocalLegacy.java |  63 ++
 .../apache/hadoop/hdfs/TestDecommission.java    | 412 ++++++++----
 .../blockmanagement/BlockManagerTestUtil.java   |   8 +-
 .../TestReplicationPolicyConsiderLoad.java      |   2 +-
 .../server/datanode/SimulatedFSDataset.java     |  11 +-
 .../datanode/TestDataNodeHotSwapVolumes.java    |  99 ++-
 .../server/datanode/TestDataNodeMetrics.java    |   2 +
 .../datanode/TestDataNodeVolumeFailure.java     |  70 +++
 .../TestDataNodeVolumeFailureReporting.java     |  36 +-
 .../extdataset/ExternalDatasetImpl.java         |  12 +-
 .../fsdataset/impl/FsDatasetTestUtil.java       |  43 ++
 .../fsdataset/impl/TestFsDatasetImpl.java       | 118 +++-
 .../web/webhdfs/TestParameterParser.java        |  19 +
 .../namenode/TestDecommissioningStatus.java     |  59 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |  11 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |   2 +-
 .../namenode/TestNamenodeCapacityReport.java    |   4 +-
 hadoop-mapreduce-project/CHANGES.txt            |  26 +-
 .../apache/hadoop/mapreduce/MRJobConfig.java    |   5 +
 .../lib/output/FileOutputCommitter.java         | 119 ++--
 .../src/main/resources/mapred-default.xml       |  62 ++
 .../hadoop/mapred/TestFileOutputCommitter.java  | 134 +++-
 .../lib/output/TestFileOutputCommitter.java     | 116 +++-
 .../org/apache/hadoop/mapred/YARNRunner.java    |   5 +-
 .../java/org/apache/hadoop/hdfs/NNBench.java    |   4 +-
 .../apache/hadoop/mapred/TestYARNRunner.java    |  26 +
 .../shellprofile.d/mapreduce                    |  41 --
 .../shellprofile.d/mapreduce.sh                 |  41 ++
 hadoop-project/pom.xml                          |  10 +-
 .../org/apache/hadoop/fs/s3a/Constants.java     |   6 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  20 +-
 .../src/site/markdown/tools/hadoop-aws/index.md |  10 +-
 hadoop-tools/hadoop-azure/pom.xml               |   7 +-
 .../fs/azure/AzureNativeFileSystemStore.java    |  92 ++-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  10 +-
 .../hadoop/fs/azure/PageBlobFormatHelpers.java  |   2 +-
 .../hadoop/fs/azure/PageBlobInputStream.java    |   8 +-
 .../hadoop/fs/azure/PageBlobOutputStream.java   |   8 +-
 .../hadoop/fs/azure/SelfRenewingLease.java      |   6 +-
 .../fs/azure/SelfThrottlingIntercept.java       |  10 +-
 .../hadoop/fs/azure/SendRequestIntercept.java   |  16 +-
 .../hadoop/fs/azure/StorageInterface.java       |  30 +-
 .../hadoop/fs/azure/StorageInterfaceImpl.java   |  50 +-
 .../fs/azure/metrics/ErrorMetricUpdater.java    |   8 +-
 .../metrics/ResponseReceivedMetricUpdater.java  |  10 +-
 .../fs/azure/AzureBlobStorageTestAccount.java   |  28 +-
 .../hadoop/fs/azure/MockStorageInterface.java   |  72 ++-
 .../fs/azure/NativeAzureFileSystemBaseTest.java |   6 +-
 .../TestAzureFileSystemErrorConditions.java     |   6 +-
 .../hadoop/fs/azure/TestBlobDataValidation.java |  20 +-
 .../hadoop/fs/azure/TestContainerChecks.java    |   6 +-
 .../TestOutOfBandAzureBlobOperationsLive.java   |   4 +-
 .../fs/azure/TestWasbUriAndConfiguration.java   |   4 +-
 hadoop-yarn-project/CHANGES.txt                 |  50 ++
 .../hadoop-yarn/bin/yarn-daemons.sh             |  13 +-
 .../dev-support/findbugs-exclude.xml            |   5 +-
 .../yarn/api/ApplicationBaseProtocol.java       | 355 +++++++++++
 .../yarn/api/ApplicationClientProtocol.java     | 290 +--------
 .../yarn/api/ApplicationHistoryProtocol.java    | 303 +--------
 .../yarn/api/records/ContainerReport.java       |  17 +-
 .../yarn/api/records/LogAggregationContext.java |  79 ++-
 .../hadoop/yarn/conf/YarnConfiguration.java     |  12 +
 .../src/main/proto/yarn_protos.proto            |   3 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  11 +-
 .../hadoop/yarn/client/ProtocolHATestBase.java  |   4 +-
 .../yarn/client/api/impl/TestAHSClient.java     |   6 +-
 .../yarn/client/api/impl/TestYarnClient.java    |  16 +-
 .../hadoop/yarn/client/cli/TestYarnCLI.java     |  18 +-
 .../records/impl/pb/ContainerReportPBImpl.java  |  19 +
 .../impl/pb/LogAggregationContextPBImpl.java    |  39 ++
 .../client/api/impl/TimelineClientImpl.java     |  80 +--
 .../logaggregation/AggregatedLogFormat.java     |  39 +-
 .../util/ResourceCalculatorProcessTree.java     |  25 +-
 .../hadoop/yarn/util/resource/Resources.java    |   5 +
 .../apache/hadoop/yarn/webapp/ResponseInfo.java |   6 +-
 .../hadoop/yarn/webapp/YarnWebParams.java       |   4 +
 .../hadoop/yarn/webapp/view/HtmlBlock.java      |   2 +
 .../src/main/resources/yarn-default.xml         |  14 +
 .../ApplicationHistoryClientService.java        | 185 +++---
 .../ApplicationHistoryManager.java              | 126 +++-
 .../ApplicationHistoryManagerImpl.java          |   2 +-
 ...pplicationHistoryManagerOnTimelineStore.java |  10 +-
 .../ApplicationHistoryServer.java               |   2 +-
 .../webapp/AHSView.java                         |  28 +-
 .../webapp/AHSWebApp.java                       |  16 +-
 .../webapp/AHSWebServices.java                  |   6 +-
 .../webapp/AppAttemptPage.java                  |  17 +-
 .../webapp/AppPage.java                         |  21 +-
 .../TestApplicationHistoryClientService.java    |  12 +-
 .../webapp/TestAHSWebApp.java                   |  27 +-
 .../webapp/TestAHSWebServices.java              |  26 +-
 .../TestTimelineAuthenticationFilter.java       | 221 +++----
 .../yarn/server/api/ApplicationContext.java     | 122 ----
 .../metrics/ContainerMetricsConstants.java      |   2 +
 .../yarn/server/webapp/AppAttemptBlock.java     | 172 ++++--
 .../hadoop/yarn/server/webapp/AppBlock.java     | 247 ++++++--
 .../hadoop/yarn/server/webapp/AppsBlock.java    |  53 +-
 .../yarn/server/webapp/ContainerBlock.java      |  36 +-
 .../hadoop/yarn/server/webapp/WebPageUtils.java |  86 +++
 .../hadoop/yarn/server/webapp/WebServices.java  |  68 +-
 .../hadoop/yarn/server/webapp/dao/AppInfo.java  |  11 +-
 .../yarn/server/webapp/dao/ContainerInfo.java   |   5 +
 .../server/nodemanager/ContainerExecutor.java   |  49 +-
 .../nodemanager/DefaultContainerExecutor.java   |   9 +-
 .../WindowsSecureContainerExecutor.java         |   9 +-
 .../logaggregation/AppLogAggregatorImpl.java    |  22 +-
 .../nodemanager/TestContainerExecutor.java      |  53 ++
 .../TestContainerManagerRecovery.java           |  10 +-
 .../TestLogAggregationService.java              |  73 ++-
 .../metrics/ContainerCreatedEvent.java          |   8 +-
 .../metrics/SystemMetricsPublisher.java         |   5 +-
 .../ProportionalCapacityPreemptionPolicy.java   |  27 +-
 .../rmcontainer/RMContainer.java                |   1 +
 .../rmcontainer/RMContainerImpl.java            |  22 +-
 .../resourcemanager/webapp/AppAttemptPage.java  |  57 ++
 .../server/resourcemanager/webapp/AppBlock.java | 344 -----------
 .../server/resourcemanager/webapp/AppPage.java  |  25 +-
 .../resourcemanager/webapp/AppsBlock.java       | 132 ----
 .../webapp/AppsBlockWithMetrics.java            |   1 +
 .../webapp/CapacitySchedulerPage.java           |   1 +
 .../resourcemanager/webapp/ContainerPage.java   |  44 ++
 .../webapp/DefaultSchedulerPage.java            |   1 +
 .../webapp/FairSchedulerPage.java               |  21 +-
 .../server/resourcemanager/webapp/RMWebApp.java |   5 +
 .../resourcemanager/webapp/RmController.java    |   8 +
 .../server/resourcemanager/webapp/RmView.java   |  31 +-
 .../resourcemanager/webapp/dao/NodesInfo.java   |   3 +
 .../webapp/dao/SchedulerTypeInfo.java           |   3 +
 .../metrics/TestSystemMetricsPublisher.java     |   5 +-
 ...estProportionalCapacityPreemptionPolicy.java |  24 +
 .../capacity/TestContainerAllocation.java       |   8 +-
 .../resourcemanager/webapp/TestAppPage.java     |   8 +-
 .../resourcemanager/webapp/TestRMWebApp.java    |  48 +-
 .../webapp/TestRMWebAppFairScheduler.java       |  14 +-
 .../src/site/markdown/CapacityScheduler.md      |  26 +
 .../src/site/markdown/ResourceManagerRestart.md |   6 +-
 .../src/site/markdown/YarnCommands.md           |  28 +-
 .../hadoop-yarn/shellprofile.d/yarn             |  62 --
 .../hadoop-yarn/shellprofile.d/yarn.sh          |  62 ++
 212 files changed, 5895 insertions(+), 3164 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb1b5960/hadoop-project/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb1b5960/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb1b5960/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb1b5960/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
----------------------------------------------------------------------
diff --cc hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index d5faaac,df6c7a4..22dcc00
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
@@@ -112,6 -108,9 +113,8 @@@ public class TimelineClientImpl extend
    private ConnectionConfigurator connConfigurator;
    private DelegationTokenAuthenticator authenticator;
    private DelegationTokenAuthenticatedURL.Token token;
 -  private URI resURI;
+   private UserGroupInformation authUgi;
+   private String doAsUser;
  
    @Private
    @VisibleForTesting
@@@ -339,46 -313,20 +351,50 @@@
      doPosting(domain, "domain");
    }
  
 +  private void putObjects(
 +      URI base, String path, MultivaluedMap<String, String> params, Object obj)
 +          throws IOException, YarnException {
 +    ClientResponse resp;
 +    try {
 +      resp = client.resource(base).path(path).queryParams(params)
 +          .accept(MediaType.APPLICATION_JSON)
 +          .type(MediaType.APPLICATION_JSON)
 +          .put(ClientResponse.class, obj);
 +    } catch (RuntimeException re) {
 +      // runtime exception is expected if the client cannot connect the server
 +      String msg =
 +          "Failed to get the response from the timeline server.";
 +      LOG.error(msg, re);
 +      throw new IOException(re);
 +    }
 +    if (resp == null ||
 +        resp.getClientResponseStatus() != ClientResponse.Status.OK) {
 +      String msg =
 +          "Failed to get the response from the timeline server.";
 +      LOG.error(msg);
 +      if (LOG.isDebugEnabled() && resp != null) {
 +        String output = resp.getEntity(String.class);
 +        LOG.debug("HTTP error code: " + resp.getStatus()
 +            + " Server response:\n" + output);
 +      }
 +      throw new YarnException(msg);
 +    }
 +  }
 +
-   private ClientResponse doPosting(Object obj, String path) throws IOException, YarnException {
+   private ClientResponse doPosting(final Object obj, final String path)
+       throws IOException, YarnException {
      ClientResponse resp;
      try {
-       resp = doPostingObject(obj, path);
-     } catch (RuntimeException re) {
-       // runtime exception is expected if the client cannot connect the server
-       String msg =
-           "Failed to get the response from the timeline server.";
-       LOG.error(msg, re);
-       throw re;
+       resp = authUgi.doAs(new PrivilegedExceptionAction<ClientResponse>() {
+         @Override
+         public ClientResponse run() throws Exception {
+           return doPostingObject(obj, path);
+         }
+       });
+     } catch (UndeclaredThrowableException e) {
+         throw new IOException(e.getCause());
+     } catch (InterruptedException ie) {
+       throw new IOException(ie);
      }
      if (resp == null ||
          resp.getClientResponseStatus() != ClientResponse.Status.OK) {


[46/49] hadoop git commit: HDFS-7722. DataNode#checkDiskError should also remove Storage when error is found. (Lei Xu via Colin P. McCabe)

Posted by zj...@apache.org.
HDFS-7722. DataNode#checkDiskError should also remove Storage when error is found. (Lei Xu via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b49c3a18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b49c3a18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b49c3a18

Branch: refs/heads/YARN-2928
Commit: b49c3a1813aa8c5b05fe6c02a653286c573137ca
Parents: 6dae6d1
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Thu Mar 12 12:00:18 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Thu Mar 12 12:00:18 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../hadoop/hdfs/server/datanode/DataNode.java   | 109 ++++++++++++++++---
 .../hdfs/server/datanode/DataStorage.java       |  15 +--
 .../server/datanode/fsdataset/FsDatasetSpi.java |  11 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  81 ++++----------
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |   3 +-
 .../datanode/fsdataset/impl/FsVolumeList.java   |  41 ++++---
 .../server/datanode/SimulatedFSDataset.java     |   6 +-
 .../datanode/TestDataNodeHotSwapVolumes.java    |  65 +++++++++++
 .../datanode/TestDataNodeVolumeFailure.java     |  70 ++++++++++++
 .../TestDataNodeVolumeFailureReporting.java     |  36 +++---
 .../extdataset/ExternalDatasetImpl.java         |   7 +-
 .../fsdataset/impl/TestFsDatasetImpl.java       |  16 +--
 13 files changed, 330 insertions(+), 133 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b49c3a18/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e52b849..153453c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1162,6 +1162,9 @@ Release 2.7.0 - UNRELEASED
       HDFS-7806. Refactor: move StorageType from hadoop-hdfs to
       hadoop-common. (Xiaoyu Yao via Arpit Agarwal)
 
+      HDFS-7722. DataNode#checkDiskError should also remove Storage when error
+      is found. (Lei Xu via Colin P. McCabe)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b49c3a18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 92ddb7b..5be6a6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -53,6 +53,7 @@ import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
+import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -73,6 +74,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -613,20 +615,16 @@ public class DataNode extends ReconfigurableBase
             errorMessageBuilder.append(
                 String.format("FAILED to ADD: %s: %s%n", volume,
                               e.toString()));
+            LOG.error("Failed to add volume: " + volume, e);
           }
         }
       }
 
-      if (!changedVolumes.deactivateLocations.isEmpty()) {
-        LOG.info("Deactivating volumes: " +
-            Joiner.on(",").join(changedVolumes.deactivateLocations));
-
-        data.removeVolumes(changedVolumes.deactivateLocations);
-        try {
-          storage.removeVolumes(changedVolumes.deactivateLocations);
-        } catch (IOException e) {
-          errorMessageBuilder.append(e.getMessage());
-        }
+      try {
+        removeVolumes(changedVolumes.deactivateLocations);
+      } catch (IOException e) {
+        errorMessageBuilder.append(e.getMessage());
+        LOG.error("Failed to remove volume: " + e.getMessage(), e);
       }
 
       if (errorMessageBuilder.length() > 0) {
@@ -639,6 +637,79 @@ public class DataNode extends ReconfigurableBase
     }
   }
 
+  /**
+   * Remove volumes from DataNode.
+   * See {@link removeVolumes(final Set<File>, boolean)} for details.
+   *
+   * @param locations the StorageLocations of the volumes to be removed.
+   * @throws IOException
+   */
+  private void removeVolumes(final Collection<StorageLocation> locations)
+    throws IOException {
+    if (locations.isEmpty()) {
+      return;
+    }
+    Set<File> volumesToRemove = new HashSet<>();
+    for (StorageLocation loc : locations) {
+      volumesToRemove.add(loc.getFile().getAbsoluteFile());
+    }
+    removeVolumes(volumesToRemove, true);
+  }
+
+  /**
+   * Remove volumes from DataNode.
+   *
+   * It does three things:
+   * <li>
+   *   <ul>Remove volumes and block info from FsDataset.</ul>
+   *   <ul>Remove volumes from DataStorage.</ul>
+   *   <ul>Reset configuration DATA_DIR and {@link dataDirs} to represent
+   *   active volumes.</ul>
+   * </li>
+   * @param absoluteVolumePaths the absolute path of volumes.
+   * @param clearFailure if true, clears the failure information related to the
+   *                     volumes.
+   * @throws IOException
+   */
+  private synchronized void removeVolumes(
+      final Set<File> absoluteVolumePaths, boolean clearFailure)
+      throws IOException {
+    for (File vol : absoluteVolumePaths) {
+      Preconditions.checkArgument(vol.isAbsolute());
+    }
+
+    if (absoluteVolumePaths.isEmpty()) {
+      return;
+    }
+
+    LOG.info(String.format("Deactivating volumes (clear failure=%b): %s",
+        clearFailure, Joiner.on(",").join(absoluteVolumePaths)));
+
+    IOException ioe = null;
+    // Remove volumes and block infos from FsDataset.
+    data.removeVolumes(absoluteVolumePaths, clearFailure);
+
+    // Remove volumes from DataStorage.
+    try {
+      storage.removeVolumes(absoluteVolumePaths);
+    } catch (IOException e) {
+      ioe = e;
+    }
+
+    // Set configuration and dataDirs to reflect volume changes.
+    for (Iterator<StorageLocation> it = dataDirs.iterator(); it.hasNext(); ) {
+      StorageLocation loc = it.next();
+      if (absoluteVolumePaths.contains(loc.getFile().getAbsoluteFile())) {
+        it.remove();
+      }
+    }
+    conf.set(DFS_DATANODE_DATA_DIR_KEY, Joiner.on(",").join(dataDirs));
+
+    if (ioe != null) {
+      throw ioe;
+    }
+  }
+
   private synchronized void setClusterId(final String nsCid, final String bpid
       ) throws IOException {
     if(clusterId != null && !clusterId.equals(nsCid)) {
@@ -3076,10 +3147,20 @@ public class DataNode extends ReconfigurableBase
    * Check the disk error
    */
   private void checkDiskError() {
-    try {
-      data.checkDataDir();
-    } catch (DiskErrorException de) {
-      handleDiskError(de.getMessage());
+    Set<File> unhealthyDataDirs = data.checkDataDir();
+    if (unhealthyDataDirs != null && !unhealthyDataDirs.isEmpty()) {
+      try {
+        // Remove all unhealthy volumes from DataNode.
+        removeVolumes(unhealthyDataDirs, false);
+      } catch (IOException e) {
+        LOG.warn("Error occurred when removing unhealthy storage dirs: "
+            + e.getMessage(), e);
+      }
+      StringBuilder sb = new StringBuilder("DataNode failed volumes:");
+      for (File dataDir : unhealthyDataDirs) {
+        sb.append(dataDir.getAbsolutePath() + ";");
+      }
+      handleDiskError(sb.toString());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b49c3a18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 001f684..f979d3c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -404,28 +404,23 @@ public class DataStorage extends Storage {
   }
 
   /**
-   * Remove volumes from DataStorage. All volumes are removed even when the
+   * Remove storage dirs from DataStorage. All storage dirs are removed even when the
    * IOException is thrown.
    *
-   * @param locations a collection of volumes.
+   * @param dirsToRemove a set of storage directories to be removed.
    * @throws IOException if I/O error when unlocking storage directory.
    */
-  synchronized void removeVolumes(Collection<StorageLocation> locations)
+  synchronized void removeVolumes(final Set<File> dirsToRemove)
       throws IOException {
-    if (locations.isEmpty()) {
+    if (dirsToRemove.isEmpty()) {
       return;
     }
 
-    Set<File> dataDirs = new HashSet<File>();
-    for (StorageLocation sl : locations) {
-      dataDirs.add(sl.getFile());
-    }
-
     StringBuilder errorMsgBuilder = new StringBuilder();
     for (Iterator<StorageDirectory> it = this.storageDirs.iterator();
          it.hasNext(); ) {
       StorageDirectory sd = it.next();
-      if (dataDirs.contains(sd.getRoot())) {
+      if (dirsToRemove.contains(sd.getRoot())) {
         // Remove the block pool level storage first.
         for (Map.Entry<String, BlockPoolSliceStorage> entry :
             this.bpStorageMap.entrySet()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b49c3a18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
index 5b183e6..6f4da09 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
@@ -27,6 +27,7 @@ import java.io.InputStream;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -113,9 +114,11 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
    * If the FSDataset supports block scanning, this function removes
    * the volumes from the block scanner.
    *
-   * @param volumes      The storage locations of the volumes to remove.
+   * @param volumes  The paths of the volumes to be removed.
+   * @param clearFailure set true to clear the failure information about the
+   *                     volumes.
    */
-  public void removeVolumes(Collection<StorageLocation> volumes);
+  public void removeVolumes(Set<File> volumes, boolean clearFailure);
 
   /** @return a storage with the given storage ID */
   public DatanodeStorage getStorage(final String storageUuid);
@@ -388,9 +391,9 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
 
     /**
      * Check if all the data directories are healthy
-     * @throws DiskErrorException
+     * @return A set of unhealthy data directories.
      */
-  public void checkDataDir() throws DiskErrorException;
+  public Set<File> checkDataDir();
 
   /**
    * Shutdown the FSDataset

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b49c3a18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 48ac6ca..486acbc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -445,41 +445,42 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   }
 
   /**
-   * Removes a collection of volumes from FsDataset.
-   * @param volumes the root directories of the volumes.
+   * Removes a set of volumes from FsDataset.
+   * @param volumesToRemove a set of absolute root path of each volume.
+   * @param clearFailure set true to clear failure information.
    *
    * DataNode should call this function before calling
    * {@link DataStorage#removeVolumes(java.util.Collection)}.
    */
   @Override
-  public synchronized void removeVolumes(Collection<StorageLocation> volumes) {
-    Set<String> volumeSet = new HashSet<>();
-    for (StorageLocation sl : volumes) {
-      volumeSet.add(sl.getFile().getAbsolutePath());
+  public synchronized void removeVolumes(
+      Set<File> volumesToRemove, boolean clearFailure) {
+    // Make sure that all volumes are absolute path.
+    for (File vol : volumesToRemove) {
+      Preconditions.checkArgument(vol.isAbsolute(),
+          String.format("%s is not absolute path.", vol.getPath()));
     }
     for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
       Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
-      String volume = sd.getRoot().getAbsolutePath();
-      if (volumeSet.contains(volume)) {
-        LOG.info("Removing " + volume + " from FsDataset.");
+      final File absRoot = sd.getRoot().getAbsoluteFile();
+      if (volumesToRemove.contains(absRoot)) {
+        LOG.info("Removing " + absRoot + " from FsDataset.");
 
         // Disable the volume from the service.
         asyncDiskService.removeVolume(sd.getCurrentDir());
-        this.volumes.removeVolume(sd.getRoot());
+        volumes.removeVolume(absRoot, clearFailure);
 
         // Removed all replica information for the blocks on the volume. Unlike
         // updating the volumeMap in addVolume(), this operation does not scan
         // disks.
         for (String bpid : volumeMap.getBlockPoolList()) {
-          List<Block> blocks = new ArrayList<Block>();
           for (Iterator<ReplicaInfo> it = volumeMap.replicas(bpid).iterator();
-              it.hasNext(); ) {
+               it.hasNext(); ) {
             ReplicaInfo block = it.next();
-            String absBasePath =
-                  new File(block.getVolume().getBasePath()).getAbsolutePath();
-            if (absBasePath.equals(volume)) {
+            final File absBasePath =
+                new File(block.getVolume().getBasePath()).getAbsoluteFile();
+            if (absBasePath.equals(absRoot)) {
               invalidate(bpid, block);
-              blocks.add(block);
               it.remove();
             }
           }
@@ -1975,50 +1976,14 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
   /**
    * check if a data directory is healthy
-   * if some volumes failed - make sure to remove all the blocks that belong
-   * to these volumes
-   * @throws DiskErrorException
+   *
+   * if some volumes failed - the caller must emove all the blocks that belong
+   * to these failed volumes.
+   * @return the failed volumes. Returns null if no volume failed.
    */
   @Override // FsDatasetSpi
-  public void checkDataDir() throws DiskErrorException {
-    long totalBlocks=0, removedBlocks=0;
-    List<FsVolumeImpl> failedVols =  volumes.checkDirs();
-    
-    // If there no failed volumes return
-    if (failedVols == null) { 
-      return;
-    }
-    
-    // Otherwise remove blocks for the failed volumes
-    long mlsec = Time.now();
-    synchronized (this) {
-      for (FsVolumeImpl fv: failedVols) {
-        for (String bpid : fv.getBlockPoolList()) {
-          Iterator<ReplicaInfo> ib = volumeMap.replicas(bpid).iterator();
-          while(ib.hasNext()) {
-            ReplicaInfo b = ib.next();
-            totalBlocks++;
-            // check if the volume block belongs to still valid
-            if(b.getVolume() == fv) {
-              LOG.warn("Removing replica " + bpid + ":" + b.getBlockId()
-                  + " on failed volume " + fv.getCurrentDir().getAbsolutePath());
-              ib.remove();
-              removedBlocks++;
-            }
-          }
-        }
-      }
-    } // end of sync
-    mlsec = Time.now() - mlsec;
-    LOG.warn("Removed " + removedBlocks + " out of " + totalBlocks +
-        "(took " + mlsec + " millisecs)");
-
-    // report the error
-    StringBuilder sb = new StringBuilder();
-    for (FsVolumeImpl fv : failedVols) {
-      sb.append(fv.getCurrentDir().getAbsolutePath() + ";");
-    }
-    throw new DiskErrorException("DataNode failed volumes:" + sb);
+  public Set<File> checkDataDir() {
+   return volumes.checkDirs();
   }
     
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b49c3a18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 744db62..23efbdf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -289,7 +289,8 @@ public class FsVolumeImpl implements FsVolumeSpi {
     }
   }
 
-  long getDfsUsed() throws IOException {
+  @VisibleForTesting
+  public long getDfsUsed() throws IOException {
     long dfsUsed = 0;
     synchronized(dataset) {
       for(BlockPoolSlice s : bpSlices.values()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b49c3a18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index b38d41f..a5611c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -24,10 +24,12 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
+import java.util.Set;
 import java.util.concurrent.atomic.AtomicReference;
 
 import com.google.common.collect.Lists;
@@ -218,16 +220,15 @@ class FsVolumeList {
   }
 
   /**
-   * Calls {@link FsVolumeImpl#checkDirs()} on each volume, removing any
-   * volumes from the active list that result in a DiskErrorException.
+   * Calls {@link FsVolumeImpl#checkDirs()} on each volume.
    * 
    * Use checkDirsMutext to allow only one instance of checkDirs() call
    *
-   * @return list of all the removed volumes.
+   * @return list of all the failed volumes.
    */
-  List<FsVolumeImpl> checkDirs() {
+  Set<File> checkDirs() {
     synchronized(checkDirsMutex) {
-      ArrayList<FsVolumeImpl> removedVols = null;
+      Set<File> failedVols = null;
       
       // Make a copy of volumes for performing modification 
       final List<FsVolumeImpl> volumeList = getVolumes();
@@ -238,12 +239,12 @@ class FsVolumeList {
           fsv.checkDirs();
         } catch (DiskErrorException e) {
           FsDatasetImpl.LOG.warn("Removing failed volume " + fsv + ": ", e);
-          if (removedVols == null) {
-            removedVols = new ArrayList<>(1);
+          if (failedVols == null) {
+            failedVols = new HashSet<>(1);
           }
-          removedVols.add(fsv);
-          removeVolume(fsv);
+          failedVols.add(new File(fsv.getBasePath()).getAbsoluteFile());
           addVolumeFailureInfo(fsv);
+          removeVolume(fsv);
         } catch (ClosedChannelException e) {
           FsDatasetImpl.LOG.debug("Caught exception when obtaining " +
             "reference count on closed volume", e);
@@ -252,12 +253,12 @@ class FsVolumeList {
         }
       }
       
-      if (removedVols != null && removedVols.size() > 0) {
-        FsDatasetImpl.LOG.warn("Completed checkDirs. Removed " + removedVols.size()
-            + " volumes. Current volumes: " + this);
+      if (failedVols != null && failedVols.size() > 0) {
+        FsDatasetImpl.LOG.warn("Completed checkDirs. Found " + failedVols.size()
+            + " failure volumes.");
       }
 
-      return removedVols;
+      return failedVols;
     }
   }
 
@@ -290,6 +291,9 @@ class FsVolumeList {
     if (blockScanner != null) {
       blockScanner.addVolumeScanner(ref);
     }
+    // If the volume is used to replace a failed volume, it needs to reset the
+    // volume failure info for this volume.
+    removeVolumeFailureInfo(new File(ref.getVolume().getBasePath()));
     FsDatasetImpl.LOG.info("Added new volume: " +
         ref.getVolume().getStorageID());
   }
@@ -337,8 +341,9 @@ class FsVolumeList {
   /**
    * Dynamically remove volume in the list.
    * @param volume the volume to be removed.
+   * @param clearFailure set true to remove failure info for this volume.
    */
-  void removeVolume(File volume) {
+  void removeVolume(File volume, boolean clearFailure) {
     // Make a copy of volumes to remove one volume.
     final FsVolumeImpl[] curVolumes = volumes.get();
     final List<FsVolumeImpl> volumeList = Lists.newArrayList(curVolumes);
@@ -352,7 +357,9 @@ class FsVolumeList {
         removeVolume(fsVolume);
       }
     }
-    removeVolumeFailureInfo(volume);
+    if (clearFailure) {
+      removeVolumeFailureInfo(volume);
+    }
   }
 
   VolumeFailureInfo[] getVolumeFailureInfos() {
@@ -366,7 +373,9 @@ class FsVolumeList {
   }
 
   private void addVolumeFailureInfo(FsVolumeImpl vol) {
-    addVolumeFailureInfo(new VolumeFailureInfo(vol.getBasePath(), Time.now(),
+    addVolumeFailureInfo(new VolumeFailureInfo(
+        new File(vol.getBasePath()).getAbsolutePath(),
+        Time.now(),
         vol.getCapacity()));
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b49c3a18/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index f0dbd0f..a4ec8d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -30,6 +30,7 @@ import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
@@ -959,8 +960,9 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   }
 
   @Override
-  public void checkDataDir() throws DiskErrorException {
+  public Set<File> checkDataDir() {
     // nothing to check for simulated data set
+    return null;
   }
 
   @Override // FsDatasetSpi
@@ -1281,7 +1283,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   }
 
   @Override
-  public synchronized void removeVolumes(Collection<StorageLocation> volumes) {
+  public synchronized void removeVolumes(Set<File> volumes, boolean clearFailure) {
     throw new UnsupportedOperationException();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b49c3a18/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index ac316e8..466598b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.BlockMissingException;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -38,6 +39,7 @@ import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
@@ -95,6 +97,8 @@ public class TestDataNodeHotSwapVolumes {
     conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
         1000);
+    /* Allow 1 volume failure */
+    conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
 
     MiniDFSNNTopology nnTopology =
         MiniDFSNNTopology.simpleFederatedTopology(numNameNodes);
@@ -646,4 +650,65 @@ public class TestDataNodeHotSwapVolumes {
     // this directory were removed from the previous step.
     dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir);
   }
+
+  /** Get the FsVolume on the given basePath */
+  private FsVolumeImpl getVolume(DataNode dn, File basePath) {
+    for (FsVolumeSpi vol : dn.getFSDataset().getVolumes()) {
+      if (vol.getBasePath().equals(basePath.getPath())) {
+        return (FsVolumeImpl)vol;
+      }
+    }
+    return null;
+  }
+
+  /**
+   * Verify that {@link DataNode#checkDiskErrors()} removes all metadata in
+   * DataNode upon a volume failure. Thus we can run reconfig on the same
+   * configuration to reload the new volume on the same directory as the failed one.
+   */
+  @Test(timeout=60000)
+  public void testDirectlyReloadAfterCheckDiskError()
+      throws IOException, TimeoutException, InterruptedException,
+      ReconfigurationException {
+    startDFSCluster(1, 2);
+    createFile(new Path("/test"), 32, (short)2);
+
+    DataNode dn = cluster.getDataNodes().get(0);
+    final String oldDataDir = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY);
+    File dirToFail = new File(cluster.getDataDirectory(), "data1");
+
+    FsVolumeImpl failedVolume = getVolume(dn, dirToFail);
+    assertTrue("No FsVolume was found for " + dirToFail,
+        failedVolume != null);
+    long used = failedVolume.getDfsUsed();
+
+    try {
+      assertTrue("Couldn't chmod local vol: " + dirToFail,
+          FileUtil.setExecutable(dirToFail, false));
+      // Call and wait DataNode to detect disk failure.
+      long lastDiskErrorCheck = dn.getLastDiskErrorCheck();
+      dn.checkDiskErrorAsync();
+      while (dn.getLastDiskErrorCheck() == lastDiskErrorCheck) {
+        Thread.sleep(100);
+      }
+
+      createFile(new Path("/test1"), 32, (short)2);
+      assertEquals(used, failedVolume.getDfsUsed());
+    } finally {
+      // Need to restore the mode on dirToFail. Otherwise, if an Exception
+      // is thrown above, the following tests can not delete this data directory
+      // and thus fail to start MiniDFSCluster.
+      assertTrue("Couldn't restore executable for: " + dirToFail,
+          FileUtil.setExecutable(dirToFail, true));
+    }
+
+    dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir);
+
+    createFile(new Path("/test2"), 32, (short)2);
+    FsVolumeImpl restoredVolume = getVolume(dn, dirToFail);
+    assertTrue(restoredVolume != null);
+    assertTrue(restoredVolume != failedVolume);
+    // More data has been written to this volume.
+    assertTrue(restoredVolume.getDfsUsed() > used);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b49c3a18/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index d9ad96b..ba786d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assume.assumeTrue;
@@ -31,6 +33,7 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -57,6 +60,10 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
@@ -201,6 +208,69 @@ public class TestDataNodeVolumeFailure {
   }
 
   /**
+   * Test that DataStorage and BlockPoolSliceStorage remove the failed volume
+   * after failure.
+   */
+  @Test(timeout=150000)
+  public void testFailedVolumeBeingRemovedFromDataNode()
+      throws InterruptedException, IOException, TimeoutException {
+    Path file1 = new Path("/test1");
+    DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L);
+    DFSTestUtil.waitReplication(fs, file1, (short) 2);
+
+    File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
+    assertTrue(FileUtil.setExecutable(dn0Vol1, false));
+    DataNode dn0 = cluster.getDataNodes().get(0);
+    long lastDiskErrorCheck = dn0.getLastDiskErrorCheck();
+    dn0.checkDiskErrorAsync();
+    // Wait checkDiskError thread finish to discover volume failure.
+    while (dn0.getLastDiskErrorCheck() == lastDiskErrorCheck) {
+      Thread.sleep(100);
+    }
+
+    // Verify dn0Vol1 has been completely removed from DN0.
+    // 1. dn0Vol1 is removed from DataStorage.
+    DataStorage storage = dn0.getStorage();
+    assertEquals(1, storage.getNumStorageDirs());
+    for (int i = 0; i < storage.getNumStorageDirs(); i++) {
+      Storage.StorageDirectory sd = storage.getStorageDir(i);
+      assertFalse(sd.getRoot().getAbsolutePath().startsWith(
+          dn0Vol1.getAbsolutePath()
+      ));
+    }
+    final String bpid = cluster.getNamesystem().getBlockPoolId();
+    BlockPoolSliceStorage bpsStorage = storage.getBPStorage(bpid);
+    assertEquals(1, bpsStorage.getNumStorageDirs());
+    for (int i = 0; i < bpsStorage.getNumStorageDirs(); i++) {
+      Storage.StorageDirectory sd = bpsStorage.getStorageDir(i);
+      assertFalse(sd.getRoot().getAbsolutePath().startsWith(
+          dn0Vol1.getAbsolutePath()
+      ));
+    }
+
+    // 2. dn0Vol1 is removed from FsDataset
+    FsDatasetSpi<? extends FsVolumeSpi> data = dn0.getFSDataset();
+    for (FsVolumeSpi volume : data.getVolumes()) {
+      assertNotEquals(new File(volume.getBasePath()).getAbsoluteFile(),
+          dn0Vol1.getAbsoluteFile());
+    }
+
+    // 3. all blocks on dn0Vol1 have been removed.
+    for (ReplicaInfo replica : FsDatasetTestUtil.getReplicas(data, bpid)) {
+      assertNotNull(replica.getVolume());
+      assertNotEquals(
+          new File(replica.getVolume().getBasePath()).getAbsoluteFile(),
+          dn0Vol1.getAbsoluteFile());
+    }
+
+    // 4. dn0Vol1 is not in DN0's configuration and dataDirs anymore.
+    String[] dataDirStrs =
+        dn0.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
+    assertEquals(1, dataDirStrs.length);
+    assertFalse(dataDirStrs[0].contains(dn0Vol1.getAbsolutePath()));
+  }
+
+  /**
    * Test that there are under replication blocks after vol failures
    */
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b49c3a18/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
index a8f7990..788ddb3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
@@ -403,23 +403,6 @@ public class TestDataNodeVolumeFailureReporting {
     checkFailuresAtNameNode(dm, dns.get(0), true, dn1Vol1.getAbsolutePath());
     checkFailuresAtNameNode(dm, dns.get(1), true, dn2Vol1.getAbsolutePath());
 
-    // Reconfigure each DataNode to remove its failed volumes.
-    reconfigureDataNode(dns.get(0), dn1Vol2);
-    reconfigureDataNode(dns.get(1), dn2Vol2);
-
-    DataNodeTestUtils.triggerHeartbeat(dns.get(0));
-    DataNodeTestUtils.triggerHeartbeat(dns.get(1));
-
-    checkFailuresAtDataNode(dns.get(0), 1, true);
-    checkFailuresAtDataNode(dns.get(1), 1, true);
-
-    // NN sees reduced capacity, but no volume failures.
-    DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 0,
-        origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
-    checkAggregateFailuresAtNameNode(true, 0);
-    checkFailuresAtNameNode(dm, dns.get(0), true);
-    checkFailuresAtNameNode(dm, dns.get(1), true);
-
     // Reconfigure again to try to add back the failed volumes.
     reconfigureDataNode(dns.get(0), dn1Vol1, dn1Vol2);
     reconfigureDataNode(dns.get(1), dn2Vol1, dn2Vol2);
@@ -460,6 +443,25 @@ public class TestDataNodeVolumeFailureReporting {
     checkAggregateFailuresAtNameNode(false, 2);
     checkFailuresAtNameNode(dm, dns.get(0), false, dn1Vol1.getAbsolutePath());
     checkFailuresAtNameNode(dm, dns.get(1), false, dn2Vol1.getAbsolutePath());
+
+    // Replace failed volume with healthy volume and run reconfigure DataNode.
+    // The failed volume information should be cleared.
+    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, true));
+    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true));
+    reconfigureDataNode(dns.get(0), dn1Vol1, dn1Vol2);
+    reconfigureDataNode(dns.get(1), dn2Vol1, dn2Vol2);
+
+    DataNodeTestUtils.triggerHeartbeat(dns.get(0));
+    DataNodeTestUtils.triggerHeartbeat(dns.get(1));
+
+    checkFailuresAtDataNode(dns.get(0), 1, true);
+    checkFailuresAtDataNode(dns.get(1), 1, true);
+
+    DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 0,
+        origCapacity, WAIT_FOR_HEARTBEATS);
+    checkAggregateFailuresAtNameNode(true, 0);
+    checkFailuresAtNameNode(dm, dns.get(0), true);
+    checkFailuresAtNameNode(dm, dns.get(1), true);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b49c3a18/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
index 6653cca..5a440c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
@@ -61,8 +61,7 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
   }
 
   @Override
-  public void removeVolumes(Collection<StorageLocation> volumes) {
-
+  public void removeVolumes(Set<File> volumes, boolean clearFailure) {
   }
 
   @Override
@@ -243,8 +242,8 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
   }
 
   @Override
-  public void checkDataDir() throws DiskErrorException {
-    throw new DiskChecker.DiskErrorException(null);
+  public Set<File> checkDataDir() {
+    return null;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b49c3a18/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 403cb2a..8654773 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -195,10 +195,10 @@ public class TestFsDatasetImpl {
     final String[] dataDirs =
         conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
     final String volumePathToRemove = dataDirs[0];
-    List<StorageLocation> volumesToRemove = new ArrayList<StorageLocation>();
-    volumesToRemove.add(StorageLocation.parse(volumePathToRemove));
+    Set<File> volumesToRemove = new HashSet<>();
+    volumesToRemove.add(StorageLocation.parse(volumePathToRemove).getFile());
 
-    dataset.removeVolumes(volumesToRemove);
+    dataset.removeVolumes(volumesToRemove, true);
     int expectedNumVolumes = dataDirs.length - 1;
     assertEquals("The volume has been removed from the volumeList.",
         expectedNumVolumes, dataset.getVolumes().size());
@@ -206,7 +206,7 @@ public class TestFsDatasetImpl {
         expectedNumVolumes, dataset.storageMap.size());
 
     try {
-      dataset.asyncDiskService.execute(volumesToRemove.get(0).getFile(),
+      dataset.asyncDiskService.execute(volumesToRemove.iterator().next(),
           new Runnable() {
             @Override
             public void run() {}
@@ -248,8 +248,9 @@ public class TestFsDatasetImpl {
 
     when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
     when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
-    List<StorageLocation> volumesToRemove = Arrays.asList(loc);
-    dataset.removeVolumes(volumesToRemove);
+    Set<File> volumesToRemove = new HashSet<>();
+    volumesToRemove.add(loc.getFile());
+    dataset.removeVolumes(volumesToRemove, true);
     assertEquals(numExistingVolumes, dataset.getVolumes().size());
   }
 
@@ -278,12 +279,13 @@ public class TestFsDatasetImpl {
     final FsVolumeImpl newVolume = mock(FsVolumeImpl.class);
     final FsVolumeReference newRef = mock(FsVolumeReference.class);
     when(newRef.getVolume()).thenReturn(newVolume);
+    when(newVolume.getBasePath()).thenReturn("data4");
     FsVolumeImpl blockedVolume = volumeList.getVolumes().get(1);
     doAnswer(new Answer() {
       @Override
       public Object answer(InvocationOnMock invocationOnMock)
           throws Throwable {
-        volumeList.removeVolume(new File("data4"));
+        volumeList.removeVolume(new File("data4"), false);
         volumeList.addVolume(newRef);
         return null;
       }


[25/49] hadoop git commit: HDFS-6806. HDFS Rolling upgrade document should mention the versions available. Contributed by J.Andreina.

Posted by zj...@apache.org.
HDFS-6806. HDFS Rolling upgrade document should mention the versions available. Contributed by J.Andreina.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82db3341
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82db3341
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82db3341

Branch: refs/heads/YARN-2928
Commit: 82db3341bfb344f10c4c6cc8eea0d8c19e05956a
Parents: d6e05c5
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Mar 10 11:22:11 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Mar 10 11:22:11 2015 +0900

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                      | 3 +++
 .../hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml             | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82db3341/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 094abfe..a2e552a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -737,6 +737,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7898. Change TestAppendSnapshotTruncate to fail-fast.
     (Tsz Wo Nicholas Sze via jing9)
 
+    HDFS-6806. HDFS Rolling upgrade document should mention the versions
+    available. (J.Andreina via aajisaka)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82db3341/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
index f2f3ebe..28649a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
@@ -47,7 +47,9 @@
     These two capabilities make it feasible to upgrade HDFS without incurring HDFS downtime.
     In order to upgrade a HDFS cluster without downtime, the cluster must be setup with HA.
   </p>
-
+  <p>
+    Note that rolling upgrade is supported only from Hadoop-2.4.0 onwards.
+  </p>
   <subsection name="Upgrade without Downtime" id="UpgradeWithoutDowntime">
   <p>
     In a HA cluster, there are two or more <em>NameNodes (NNs)</em>, many <em>DataNodes (DNs)</em>,


[07/49] hadoop git commit: YARN-2190. Added CPU and memory limit options to the default container executor for Windows containers. Contributed by Chuan Liu

Posted by zj...@apache.org.
YARN-2190. Added CPU and memory limit options to the default container executor for Windows containers. Contributed by Chuan Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21101c01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21101c01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21101c01

Branch: refs/heads/YARN-2928
Commit: 21101c01f242439ec8ec40fb3a9ab1991ae0adc7
Parents: 01bfe6f
Author: Jian He <ji...@apache.org>
Authored: Fri Mar 6 14:17:57 2015 -0800
Committer: Jian He <ji...@apache.org>
Committed: Fri Mar 6 14:18:11 2015 -0800

----------------------------------------------------------------------
 BUILDING.txt                                    |   9 +-
 .../hadoop-common/src/main/winutils/task.c      | 144 ++++++++++++++++---
 .../src/main/winutils/win8sdk.props             |  28 ++++
 .../src/main/winutils/winutils.vcxproj          |   3 +
 .../org/apache/hadoop/util/TestWinUtils.java    |  62 ++++++++
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java     |  12 ++
 .../src/main/resources/yarn-default.xml         |  14 ++
 .../server/nodemanager/ContainerExecutor.java   |  49 ++++++-
 .../nodemanager/DefaultContainerExecutor.java   |   9 +-
 .../WindowsSecureContainerExecutor.java         |   9 +-
 .../nodemanager/TestContainerExecutor.java      |  53 +++++++
 12 files changed, 360 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21101c01/BUILDING.txt
----------------------------------------------------------------------
diff --git a/BUILDING.txt b/BUILDING.txt
index 6e38ad3..b60da6c 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -209,7 +209,8 @@ Requirements:
 * Findbugs 1.3.9 (if running findbugs)
 * ProtocolBuffer 2.5.0
 * CMake 2.6 or newer
-* Windows SDK or Visual Studio 2010 Professional
+* Windows SDK 7.1 or Visual Studio 2010 Professional
+* Windows SDK 8.1 (if building CPU rate control for the container executor)
 * zlib headers (if building native code bindings for zlib)
 * Internet connection for first build (to fetch all Maven and Hadoop dependencies)
 * Unix command-line tools from GnuWin32: sh, mkdir, rm, cp, tar, gzip. These
@@ -220,11 +221,15 @@ can be downloaded from http://git-scm.com/download/win.
 
 If using Visual Studio, it must be Visual Studio 2010 Professional (not 2012).
 Do not use Visual Studio Express.  It does not support compiling for 64-bit,
-which is problematic if running a 64-bit system.  The Windows SDK is free to
+which is problematic if running a 64-bit system.  The Windows SDK 7.1 is free to
 download here:
 
 http://www.microsoft.com/en-us/download/details.aspx?id=8279
 
+The Windows SDK 8.1 is available to download at:
+
+http://msdn.microsoft.com/en-us/windows/bg162891.aspx
+
 Cygwin is neither required nor supported.
 
 ----------------------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21101c01/hadoop-common-project/hadoop-common/src/main/winutils/task.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/task.c b/hadoop-common-project/hadoop-common/src/main/winutils/task.c
index 21b1893..37c6ca1 100644
--- a/hadoop-common-project/hadoop-common/src/main/winutils/task.c
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/task.c
@@ -49,6 +49,31 @@ typedef enum TaskCommandOptionType
   TaskProcessList
 } TaskCommandOption;
 
+ //----------------------------------------------------------------------------
+// Function: GetLimit
+//
+// Description:
+//  Get the resource limit value in long type given the command line argument.
+//
+// Returns:
+// TRUE: If successfully get the value
+// FALSE: otherwise
+static BOOL GetLimit(__in const wchar_t *str, __out long *value)
+{
+  wchar_t *end = NULL;
+  if (str == NULL || value == NULL) return FALSE;
+  *value = wcstol(str, &end, 10);
+  if (end == NULL || *end != '\0')
+  {
+    *value = -1;
+    return FALSE;
+  }
+  else
+  {
+    return TRUE;
+  }
+}
+
 //----------------------------------------------------------------------------
 // Function: ParseCommandLine
 //
@@ -61,7 +86,9 @@ typedef enum TaskCommandOptionType
 // FALSE: otherwise
 static BOOL ParseCommandLine(__in int argc,
                              __in_ecount(argc) wchar_t *argv[],
-                             __out TaskCommandOption *command)
+                             __out TaskCommandOption *command,
+                             __out_opt long *memory,
+                             __out_opt long *vcore)
 {
   *command = TaskInvalid;
 
@@ -88,9 +115,44 @@ static BOOL ParseCommandLine(__in int argc,
     }
   }
 
-  if (argc == 4) {
+  if (argc >= 4 && argc <= 8) {
     if (wcscmp(argv[1], L"create") == 0)
     {
+      int i;
+      for (i = 2; i < argc - 3; i++)
+      {
+        if (wcscmp(argv[i], L"-c") == 0)
+        {
+          if (vcore != NULL && !GetLimit(argv[i + 1], vcore))
+          {
+            return FALSE;
+          }
+          else
+          {
+            i++;
+            continue;
+          }
+        }
+        else if (wcscmp(argv[i], L"-m") == 0)
+        {
+          if (memory != NULL && !GetLimit(argv[i + 1], memory))
+          {
+            return FALSE;
+          }
+          else
+          {
+            i++;
+            continue;
+          }
+        }
+        else
+        {
+          break;
+        }
+      }
+      if (argc - i != 2)
+        return FALSE;
+
       *command = TaskCreate;
       return TRUE;
     }
@@ -573,7 +635,7 @@ done:
 // ERROR_SUCCESS: On success
 // GetLastError: otherwise
 DWORD CreateTaskImpl(__in_opt HANDLE logonHandle, __in PCWSTR jobObjName,__in PCWSTR cmdLine, 
-  __in LPCWSTR userName) 
+  __in LPCWSTR userName, __in long memory, __in long cpuRate)
 {
   DWORD dwErrorCode = ERROR_SUCCESS;
   DWORD exitCode = EXIT_FAILURE;
@@ -616,6 +678,12 @@ DWORD CreateTaskImpl(__in_opt HANDLE logonHandle, __in PCWSTR jobObjName,__in PC
     return dwErrorCode;
   }
   jeli.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE;
+  if (memory > 0)
+  {
+    jeli.BasicLimitInformation.LimitFlags |= JOB_OBJECT_LIMIT_JOB_MEMORY;
+    jeli.ProcessMemoryLimit = ((SIZE_T) memory) * 1024 * 1024;
+    jeli.JobMemoryLimit = ((SIZE_T) memory) * 1024 * 1024;
+  }
   if(SetInformationJobObject(jobObject, 
                              JobObjectExtendedLimitInformation, 
                              &jeli, 
@@ -626,6 +694,24 @@ DWORD CreateTaskImpl(__in_opt HANDLE logonHandle, __in PCWSTR jobObjName,__in PC
     CloseHandle(jobObject);
     return dwErrorCode;
   }
+#ifdef NTDDI_WIN8
+  if (cpuRate > 0)
+  {
+    JOBOBJECT_CPU_RATE_CONTROL_INFORMATION jcrci = { 0 };
+    SYSTEM_INFO sysinfo;
+    GetSystemInfo(&sysinfo);
+    jcrci.ControlFlags = JOB_OBJECT_CPU_RATE_CONTROL_ENABLE |
+      JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP;
+    jcrci.CpuRate = min(10000, cpuRate);
+    if(SetInformationJobObject(jobObject, JobObjectCpuRateControlInformation,
+          &jcrci, sizeof(jcrci)) == 0)
+    {
+      dwErrorCode = GetLastError();
+      CloseHandle(jobObject);
+      return dwErrorCode;
+    }
+  }
+#endif
 
   if (logonHandle != NULL) {
     dwErrorCode = AddNodeManagerAndUserACEsToObject(jobObject, userName, JOB_OBJECT_ALL_ACCESS);
@@ -809,10 +895,10 @@ create_process_done:
 // Returns:
 // ERROR_SUCCESS: On success
 // GetLastError: otherwise
-DWORD CreateTask(__in PCWSTR jobObjName,__in PWSTR cmdLine) 
+DWORD CreateTask(__in PCWSTR jobObjName,__in PWSTR cmdLine, __in long memory, __in long cpuRate)
 {
   // call with null logon in order to create tasks utilizing the current logon
-  return CreateTaskImpl( NULL, jobObjName, cmdLine, NULL);
+  return CreateTaskImpl( NULL, jobObjName, cmdLine, NULL, memory, cpuRate);
 }
 
 //----------------------------------------------------------------------------
@@ -893,7 +979,7 @@ DWORD CreateTaskAsUser(__in PCWSTR jobObjName,
       goto done;
   }
 
-  err = CreateTaskImpl(logonHandle, jobObjName, cmdLine, user);
+  err = CreateTaskImpl(logonHandle, jobObjName, cmdLine, user, -1, -1);
 
 done: 
   if( profileIsLoaded ) {
@@ -1095,6 +1181,8 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[])
 {
   DWORD dwErrorCode = ERROR_SUCCESS;
   TaskCommandOption command = TaskInvalid;
+  long memory = -1;
+  long cpuRate = -1;
   wchar_t* cmdLine = NULL;
   wchar_t buffer[16*1024] = L""; // 32K max command line
   size_t charCountBufferLeft = sizeof(buffer)/sizeof(wchar_t);
@@ -1111,7 +1199,7 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[])
                ARGC_COMMAND_ARGS
        };
 
-  if (!ParseCommandLine(argc, argv, &command)) {
+  if (!ParseCommandLine(argc, argv, &command, &memory, &cpuRate)) {
     dwErrorCode = ERROR_INVALID_COMMAND_LINE;
 
     fwprintf(stderr, L"Incorrect command line arguments.\n\n");
@@ -1123,7 +1211,7 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[])
   {
     // Create the task jobobject
     //
-    dwErrorCode = CreateTask(argv[2], argv[3]);
+    dwErrorCode = CreateTask(argv[argc-2], argv[argc-1], memory, cpuRate);
     if (dwErrorCode != ERROR_SUCCESS)
     {
       ReportErrorCode(L"CreateTask", dwErrorCode);
@@ -1238,18 +1326,30 @@ void TaskUsage()
   // jobobject's are being used.
   // ProcessTree.isSetsidSupported()
   fwprintf(stdout, L"\
-    Usage: task create [TASKNAME] [COMMAND_LINE] |\n\
-          task createAsUser [TASKNAME] [USERNAME] [PIDFILE] [COMMAND_LINE] |\n\
-          task isAlive [TASKNAME] |\n\
-          task kill [TASKNAME]\n\
-          task processList [TASKNAME]\n\
-    Creates a new task jobobject with taskname\n\
-    Creates a new task jobobject with taskname as the user provided\n\
-    Checks if task jobobject is alive\n\
-    Kills task jobobject\n\
-    Prints to stdout a list of processes in the task\n\
-    along with their resource usage. One process per line\n\
-    and comma separated info per process\n\
-    ProcessId,VirtualMemoryCommitted(bytes),\n\
-    WorkingSetSize(bytes),CpuTime(Millisec,Kernel+User)\n");
+Usage: task create [OPTOINS] [TASKNAME] [COMMAND_LINE]\n\
+         Creates a new task job object with taskname and options to set CPU\n\
+         and memory limits on the job object\n\
+\n\
+         OPTIONS: -c [cup rate] set the cpu rate limit on the job object.\n\
+                  -m [memory] set the memory limit on the job object.\n\
+         The cpu limit is an integral value of percentage * 100. The memory\n\
+         limit is an integral number of memory in MB. \n\
+         The limit will not be set if 0 or negative value is passed in as\n\
+         parameter(s).\n\
+\n\
+       task createAsUser [TASKNAME] [USERNAME] [PIDFILE] [COMMAND_LINE]\n\
+         Creates a new task jobobject with taskname as the user provided\n\
+\n\
+       task isAlive [TASKNAME]\n\
+         Checks if task job object is alive\n\
+\n\
+       task kill [TASKNAME]\n\
+         Kills task job object\n\
+\n\
+       task processList [TASKNAME]\n\
+         Prints to stdout a list of processes in the task\n\
+         along with their resource usage. One process per line\n\
+         and comma separated info per process\n\
+         ProcessId,VirtualMemoryCommitted(bytes),\n\
+         WorkingSetSize(bytes),CpuTime(Millisec,Kernel+User)\n");
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21101c01/hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props b/hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props
new file mode 100644
index 0000000..503b37a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ImportGroup Label="PropertySheets" />
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup>
+   <ExecutablePath>$(VCInstallDir)bin\x86_amd64;$(VCInstallDir)bin;$(WindowsSdkDir)bin\NETFX 4.0 Tools;$(MSBuildProgramFiles32)\Windows Kits\8.1\bin\x86;$(VSInstallDir)Common7\Tools\bin;$(VSInstallDir)Common7\tools;$(VSInstallDir)Common7\ide;$(MSBuildProgramFiles32)\HTML Help Workshop;$(FrameworkSDKDir)\bin;$(MSBuildToolsPath32);$(VSInstallDir);$(SystemRoot)\SysWow64;$(FxCopDir);$(PATH)</ExecutablePath>
+   <IncludePath>$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\um;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\shared;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(FrameworkSDKDir)\include;</IncludePath>
+   <LibraryPath>$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(MSBuildProgramFiles32)\Windows Kits\8.1\lib\win8\um\x64;$(MSBuildProgramFiles32)\Windows Kits\8.1\Lib\winv6.3\um\x64;$(FrameworkSDKDir)\lib\x64</LibraryPath>
+   <ExcludePath>$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\um;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\shared;$(FrameworkSDKDir)\include;$(MSBuildToolsPath32);$(VCInstallDir)atlmfc\lib;$(VCInstallDir)lib;</ExcludePath>
+ </PropertyGroup>
+<ItemDefinitionGroup />
+</Project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21101c01/hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj b/hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj
index 9ecba0a..76a7414 100644
--- a/hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj
@@ -67,6 +67,9 @@
   </PropertyGroup>
   <ImportGroup Label="ExtensionSettings">
   </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="exists('$(MSBuildProgramFiles32)\Windows Kits\8.1')">
+    <Import Project="win8sdk.props" />
+  </ImportGroup>
   <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
     <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
   </ImportGroup>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21101c01/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java
index 8ac6e40..987c706 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java
@@ -547,4 +547,66 @@ public class TestWinUtils {
     
     assertThat(outNumber, containsString(testNumber));
   }
+
+  @Test (timeout = 30000)
+  public void testTaskCreateWithLimits() throws IOException {
+    // Generate a unique job id
+    String jobId = String.format("%f", Math.random());
+
+    // Run a task without any options
+    String out = Shell.execCommand(Shell.WINUTILS, "task", "create",
+        "job" + jobId, "cmd /c echo job" + jobId);
+    assertTrue(out.trim().equals("job" + jobId));
+
+    // Run a task without any limits
+    jobId = String.format("%f", Math.random());
+    out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-1", "-m",
+        "-1", "job" + jobId, "cmd /c echo job" + jobId);
+    assertTrue(out.trim().equals("job" + jobId));
+
+    // Run a task with limits (128MB should be enough for a cmd)
+    jobId = String.format("%f", Math.random());
+    out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "10000", "-m",
+        "128", "job" + jobId, "cmd /c echo job" + jobId);
+    assertTrue(out.trim().equals("job" + jobId));
+
+    // Run a task without enough memory
+    try {
+      jobId = String.format("%f", Math.random());
+      out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-m", "128", "job"
+          + jobId, "java -Xmx256m -version");
+      fail("Failed to get Shell.ExitCodeException with insufficient memory");
+    } catch (Shell.ExitCodeException ece) {
+      assertThat(ece.getExitCode(), is(1));
+    }
+
+    // Run tasks with wrong parameters
+    //
+    try {
+      jobId = String.format("%f", Math.random());
+      Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-1", "-m",
+          "-1", "foo", "job" + jobId, "cmd /c echo job" + jobId);
+      fail("Failed to get Shell.ExitCodeException with bad parameters");
+    } catch (Shell.ExitCodeException ece) {
+      assertThat(ece.getExitCode(), is(1639));
+    }
+
+    try {
+      jobId = String.format("%f", Math.random());
+      Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-m", "-1",
+          "job" + jobId, "cmd /c echo job" + jobId);
+      fail("Failed to get Shell.ExitCodeException with bad parameters");
+    } catch (Shell.ExitCodeException ece) {
+      assertThat(ece.getExitCode(), is(1639));
+    }
+
+    try {
+      jobId = String.format("%f", Math.random());
+      Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "foo",
+          "job" + jobId, "cmd /c echo job" + jobId);
+      fail("Failed to get Shell.ExitCodeException with bad parameters");
+    } catch (Shell.ExitCodeException ece) {
+      assertThat(ece.getExitCode(), is(1639));
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21101c01/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d073169..c2aa2ef 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -363,6 +363,9 @@ Release 2.7.0 - UNRELEASED
     YARN-1809. Synchronize RM and TimeLineServer Web-UIs. (Zhijie Shen and
     Xuan Gong via jianhe)
 
+    YARN-2190. Added CPU and memory limit options to the default container
+    executor for Windows containers. (Chuan Liu via jianhe)
+
   OPTIMIZATIONS
 
     YARN-2990. FairScheduler's delay-scheduling always waits for node-local and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21101c01/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 25b808e..8c83fea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1027,6 +1027,18 @@ public class YarnConfiguration extends Configuration {
   public static final long DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_DELAY =
       20;
 
+  /**
+   * Indicates if memory and CPU limits will be set for the Windows Job
+   * Object for the containers launched by the default container executor.
+   */
+  public static final String NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED =
+      NM_PREFIX + "windows-container.memory-limit.enabled";
+  public static final boolean DEFAULT_NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED = false;
+
+  public static final String NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED =
+      NM_PREFIX + "windows-container.cpu-limit.enabled";
+  public static final boolean DEFAULT_NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED = false;
+
   /** 
   /* The Windows group that the windows-secure-container-executor should run as.
   */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21101c01/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index df730d5..66400c8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1075,6 +1075,20 @@
   </property>
 
   <property>
+    <description>This flag determines whether memory limit will be set for the Windows Job
+    Object of the containers launched by the default container executor.</description>
+    <name>yarn.nodemanager.windows-container.memory-limit.enabled</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>This flag determines whether CPU limit will be set for the Windows Job
+    Object of the containers launched by the default container executor.</description>
+    <name>yarn.nodemanager.windows-container.cpu-limit.enabled</name>
+    <value>false</value>
+  </property>
+
+  <property>
     <description>T-file compression types used to compress aggregated logs.</description>
     <name>yarn.nodemanager.log-aggregation.compression-type</name>
     <value>none</value>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21101c01/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 77193df..248a393 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
@@ -298,6 +299,11 @@ public abstract class ContainerExecutor implements Configurable {
       readLock.unlock();
     }
   }
+
+  protected String[] getRunCommand(String command, String groupId,
+      String userName, Path pidFile, Configuration conf) {
+    return getRunCommand(command, groupId, userName, pidFile, conf, null);
+  }
   
   /** 
    *  Return a command to execute the given command in OS shell.
@@ -306,7 +312,7 @@ public abstract class ContainerExecutor implements Configurable {
    *  non-Windows, groupId is ignored. 
    */
   protected String[] getRunCommand(String command, String groupId,
-      String userName, Path pidFile, Configuration conf) {
+      String userName, Path pidFile, Configuration conf, Resource resource) {
     boolean containerSchedPriorityIsSet = false;
     int containerSchedPriorityAdjustment = 
         YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY;
@@ -320,7 +326,46 @@ public abstract class ContainerExecutor implements Configurable {
     }
   
     if (Shell.WINDOWS) {
-      return new String[] { Shell.WINUTILS, "task", "create", groupId,
+      int cpuRate = -1;
+      int memory = -1;
+      if (resource != null) {
+        if (conf
+            .getBoolean(
+                YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED,
+                YarnConfiguration.DEFAULT_NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED)) {
+          memory = resource.getMemory();
+        }
+
+        if (conf.getBoolean(
+            YarnConfiguration.NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED,
+            YarnConfiguration.DEFAULT_NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED)) {
+          int containerVCores = resource.getVirtualCores();
+          int nodeVCores = conf.getInt(YarnConfiguration.NM_VCORES,
+              YarnConfiguration.DEFAULT_NM_VCORES);
+          // cap overall usage to the number of cores allocated to YARN
+          int nodeCpuPercentage = Math
+              .min(
+                  conf.getInt(
+                      YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT,
+                      YarnConfiguration.DEFAULT_NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT),
+                  100);
+          nodeCpuPercentage = Math.max(0, nodeCpuPercentage);
+          if (nodeCpuPercentage == 0) {
+            String message = "Illegal value for "
+                + YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT
+                + ". Value cannot be less than or equal to 0.";
+            throw new IllegalArgumentException(message);
+          }
+          float yarnVCores = (nodeCpuPercentage * nodeVCores) / 100.0f;
+          // CPU should be set to a percentage * 100, e.g. 20% cpu rate limit
+          // should be set as 20 * 100. The following setting is equal to:
+          // 100 * (100 * (vcores / Total # of cores allocated to YARN))
+          cpuRate = Math.min(10000,
+              (int) ((containerVCores * 10000) / yarnVCores));
+        }
+      }
+      return new String[] { Shell.WINUTILS, "task", "create", "-m",
+          String.valueOf(memory), "-c", String.valueOf(cpuRate), groupId,
           "cmd /c " + command };
     } else {
       List<String> retCommand = new ArrayList<String>();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21101c01/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
index f3d2121..e0ecea3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.util.Shell.CommandExecutor;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
@@ -202,7 +203,7 @@ public class DefaultContainerExecutor extends ContainerExecutor {
       setScriptExecutable(sb.getWrapperScriptPath(), user);
 
       shExec = buildCommandExecutor(sb.getWrapperScriptPath().toString(),
-          containerIdStr, user, pidFile,
+          containerIdStr, user, pidFile, container.getResource(),
           new File(containerWorkDir.toUri().getPath()),
           container.getLaunchContext().getEnvironment());
       
@@ -256,12 +257,12 @@ public class DefaultContainerExecutor extends ContainerExecutor {
   }
 
   protected CommandExecutor buildCommandExecutor(String wrapperScriptPath, 
-      String containerIdStr, String user, Path pidFile, File wordDir, 
-      Map<String, String> environment) 
+      String containerIdStr, String user, Path pidFile, Resource resource,
+      File wordDir, Map<String, String> environment)
           throws IOException {
     
     String[] command = getRunCommand(wrapperScriptPath,
-        containerIdStr, user, pidFile, this.getConf());
+        containerIdStr, user, pidFile, this.getConf(), resource);
 
       LOG.info("launchContainer: " + Arrays.toString(command));
       return new ShellCommandExecutor(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21101c01/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
index cd3e71a..b7bec5f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.io.nativeio.NativeIOException;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.CommandExecutor;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
@@ -727,11 +728,9 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
    }
  
    @Override
-   protected CommandExecutor buildCommandExecutor(String wrapperScriptPath, 
-       String containerIdStr,
-     String userName, Path pidFile,File wordDir, Map<String, String> environment) 
-     throws IOException {
-
+  protected CommandExecutor buildCommandExecutor(String wrapperScriptPath,
+      String containerIdStr, String userName, Path pidFile, Resource resource,
+      File wordDir, Map<String, String> environment) throws IOException {
      return new WintuilsProcessStubExecutor(
          wordDir.toString(),
          containerIdStr, userName, pidFile.toString(), 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21101c01/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java
index fd3634b..dc3e941 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java
@@ -18,13 +18,21 @@
 
 package org.apache.hadoop.yarn.server.nodemanager;
 
+import java.util.Arrays;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 
 
+import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils;
+import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
+import org.junit.Assert;
 import org.junit.Test;
+
 import static org.junit.Assert.*;
+import static org.junit.Assume.assumeTrue;
 
 public class TestContainerExecutor {
   
@@ -69,4 +77,49 @@ public class TestContainerExecutor {
     }
   }
 
+  @Test (timeout = 5000)
+  public void testRunCommandWithNoResources() {
+    // Windows only test
+    assumeTrue(Shell.WINDOWS);
+    Configuration conf = new Configuration();
+    String[] command = containerExecutor.getRunCommand("echo", "group1", null, null,
+        conf, Resource.newInstance(1024, 1));
+    // Assert the cpu and memory limits are set correctly in the command
+    String[] expected = { Shell.WINUTILS, "task", "create", "-m", "-1", "-c",
+        "-1", "group1", "cmd /c " + "echo" };
+    Assert.assertTrue(Arrays.equals(expected, command));
+  }
+
+  @Test (timeout = 5000)
+  public void testRunCommandWithMemoryOnlyResources() {
+    // Windows only test
+    assumeTrue(Shell.WINDOWS);
+    Configuration conf = new Configuration();
+    conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED, "true");
+    String[] command = containerExecutor.getRunCommand("echo", "group1", null, null,
+        conf, Resource.newInstance(1024, 1));
+    // Assert the cpu and memory limits are set correctly in the command
+    String[] expected = { Shell.WINUTILS, "task", "create", "-m", "1024", "-c",
+        "-1", "group1", "cmd /c " + "echo" };
+    Assert.assertTrue(Arrays.equals(expected, command));
+  }
+
+  @Test (timeout = 5000)
+  public void testRunCommandWithCpuAndMemoryResources() {
+    // Windows only test
+    assumeTrue(Shell.WINDOWS);
+    Configuration conf = new Configuration();
+    conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED, "true");
+    conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED, "true");
+    String[] command = containerExecutor.getRunCommand("echo", "group1", null, null,
+        conf, Resource.newInstance(1024, 1));
+    float yarnProcessors = NodeManagerHardwareUtils.getContainersCores(
+        ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf),
+        conf);
+    int cpuRate = Math.min(10000, (int) ((1 * 10000) / yarnProcessors));
+    // Assert the cpu and memory limits are set correctly in the command
+    String[] expected = { Shell.WINUTILS, "task", "create", "-m", "1024", "-c",
+        String.valueOf(cpuRate), "group1", "cmd /c " + "echo" };
+    Assert.assertTrue(Arrays.equals(expected, command));
+  }
 }


[09/49] hadoop git commit: YARN-3275. CapacityScheduler: Preemption happening on non-preemptable queues. Contributed by Eric Payne

Posted by zj...@apache.org.
YARN-3275. CapacityScheduler: Preemption happening on non-preemptable queues. Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27e8ea82
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27e8ea82
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27e8ea82

Branch: refs/heads/YARN-2928
Commit: 27e8ea820fab8dce59f4db9814e73bd60c1d4ef1
Parents: c797103
Author: Jason Lowe <jl...@apache.org>
Authored: Fri Mar 6 22:36:18 2015 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Fri Mar 6 22:37:26 2015 +0000

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +++
 .../hadoop/yarn/util/resource/Resources.java    |  5 ++++
 .../ProportionalCapacityPreemptionPolicy.java   | 27 ++++++++++++++++----
 ...estProportionalCapacityPreemptionPolicy.java | 24 +++++++++++++++++
 4 files changed, 54 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27e8ea82/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c2aa2ef..250fc1c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -719,6 +719,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3227. Timeline renew delegation token fails when RM user's TGT is expired
     (Zhijie Shen via xgong)
 
+    YARN-3275. CapacityScheduler: Preemption happening on non-preemptable
+    queues (Eric Payne via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27e8ea82/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index a205bd1..bcb0421 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -260,4 +260,9 @@ public class Resources {
     return createResource(Math.min(lhs.getMemory(), rhs.getMemory()),
         Math.min(lhs.getVirtualCores(), rhs.getVirtualCores()));
   }
+  
+  public static Resource componentwiseMax(Resource lhs, Resource rhs) {
+    return createResource(Math.max(lhs.getMemory(), rhs.getMemory()),
+        Math.max(lhs.getVirtualCores(), rhs.getVirtualCores()));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27e8ea82/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 738f527..87a2a00 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -527,6 +527,17 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
     List<RMContainer> skippedAMContainerlist = new ArrayList<RMContainer>();
 
     for (TempQueue qT : queues) {
+      if (qT.preemptionDisabled && qT.leafQueue != null) {
+        if (LOG.isDebugEnabled()) {
+          if (Resources.greaterThan(rc, clusterResource,
+              qT.toBePreempted, Resource.newInstance(0, 0))) {
+            LOG.debug("Tried to preempt the following "
+                      + "resources from non-preemptable queue: "
+                      + qT.queueName + " - Resources: " + qT.toBePreempted);
+          }
+        }
+        continue;
+      }
       // we act only if we are violating balance by more than
       // maxIgnoredOverCapacity
       if (Resources.greaterThan(rc, clusterResource, qT.current,
@@ -734,6 +745,7 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
       float absUsed = root.getAbsoluteUsedCapacity();
       float absCap = root.getAbsoluteCapacity();
       float absMaxCap = root.getAbsoluteMaximumCapacity();
+      boolean preemptionDisabled = root.getPreemptionDisabled();
 
       Resource current = Resources.multiply(clusterResources, absUsed);
       Resource guaranteed = Resources.multiply(clusterResources, absCap);
@@ -747,8 +759,8 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
         LeafQueue l = (LeafQueue) root;
         Resource pending = l.getTotalResourcePending();
         ret = new TempQueue(queueName, current, pending, guaranteed,
-            maxCapacity);
-        if (root.getPreemptionDisabled()) {
+            maxCapacity, preemptionDisabled);
+        if (preemptionDisabled) {
           ret.untouchableExtra = extra;
         } else {
           ret.preemptableExtra = extra;
@@ -757,7 +769,7 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
       } else {
         Resource pending = Resource.newInstance(0, 0);
         ret = new TempQueue(root.getQueueName(), current, pending, guaranteed,
-            maxCapacity);
+            maxCapacity, false);
         Resource childrensPreemptable = Resource.newInstance(0, 0);
         for (CSQueue c : root.getChildQueues()) {
           TempQueue subq = cloneQueues(c, clusterResources);
@@ -816,9 +828,10 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
 
     final ArrayList<TempQueue> children;
     LeafQueue leafQueue;
+    boolean preemptionDisabled;
 
     TempQueue(String queueName, Resource current, Resource pending,
-        Resource guaranteed, Resource maxCapacity) {
+        Resource guaranteed, Resource maxCapacity, boolean preemptionDisabled) {
       this.queueName = queueName;
       this.current = current;
       this.pending = pending;
@@ -831,6 +844,7 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
       this.children = new ArrayList<TempQueue>();
       this.untouchableExtra = Resource.newInstance(0, 0);
       this.preemptableExtra = Resource.newInstance(0, 0);
+      this.preemptionDisabled = preemptionDisabled;
     }
 
     public void setLeafQueue(LeafQueue l){
@@ -862,10 +876,13 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
     // the unused ones
     Resource offer(Resource avail, ResourceCalculator rc,
         Resource clusterResource) {
+      Resource absMaxCapIdealAssignedDelta = Resources.componentwiseMax(
+                      Resources.subtract(maxCapacity, idealAssigned),
+                      Resource.newInstance(0, 0));
       // remain = avail - min(avail, (max - assigned), (current + pending - assigned))
       Resource accepted = 
           Resources.min(rc, clusterResource, 
-              Resources.subtract(maxCapacity, idealAssigned),
+              absMaxCapIdealAssignedDelta,
           Resources.min(rc, clusterResource, avail, Resources.subtract(
               Resources.add(current, pending), idealAssigned)));
       Resource remain = Resources.subtract(avail, accepted);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27e8ea82/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
index 696b9bb..8f5237e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
@@ -532,6 +532,30 @@ public class TestProportionalCapacityPreemptionPolicy {
   }
 
   @Test
+  public void testPerQueueDisablePreemptionOverAbsMaxCapacity() {
+    int[][] qData = new int[][] {
+        //  /    A              D
+        //            B    C         E    F
+        {1000, 725, 360, 365, 275,  17, 258 },  // absCap
+        {1000,1000,1000,1000, 550, 109,1000 },  // absMaxCap
+        {1000, 741, 396, 345, 259, 110, 149 },  // used
+        {  40,  20,   0,  20,  20,  20,   0 },  // pending
+        {   0,   0,   0,   0,   0,   0,   0 },  // reserved
+        //          appA appB     appC appD
+        {   4,   2,   1,   1,   2,   1,   1 },  // apps
+        {  -1,  -1,   1,   1,  -1,   1,   1 },  // req granulrity
+        {   2,   2,   0,   0,   2,   0,   0 },  // subqueues
+    };
+    // QueueE inherits non-preemption from QueueD
+    schedConf.setPreemptionDisabled("root.queueD", true);
+    ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
+    policy.editSchedule();
+    // appC is running on QueueE. QueueE is over absMaxCap, but is not
+    // preemptable. Therefore, appC resources should not be preempted.
+    verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appC)));
+  }
+
+  @Test
   public void testOverCapacityImbalance() {
     int[][] qData = new int[][]{
       //  /   A   B   C


[26/49] hadoop git commit: YARN-3300. Outstanding_resource_requests table should not be shown in AHS. Contributed by Xuan Gong

Posted by zj...@apache.org.
YARN-3300. Outstanding_resource_requests table should not be shown in AHS. Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c3003eba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c3003eba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c3003eba

Branch: refs/heads/YARN-2928
Commit: c3003eba6f9802f15699564a5eb7c6e34424cb14
Parents: 82db334
Author: Jian He <ji...@apache.org>
Authored: Mon Mar 9 17:49:08 2015 -0700
Committer: Jian He <ji...@apache.org>
Committed: Mon Mar 9 20:46:48 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 ++
 .../webapp/AppAttemptPage.java                  |  2 +
 .../yarn/server/webapp/AppAttemptBlock.java     | 47 ++++++++++++++++++++
 .../hadoop/yarn/server/webapp/AppBlock.java     | 42 +----------------
 .../resourcemanager/webapp/AppAttemptPage.java  |  2 +
 5 files changed, 56 insertions(+), 40 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3003eba/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1894552..a6dcb29 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -740,6 +740,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3287. Made TimelineClient put methods do as the correct login context.
     (Daryn Sharp and Jonathan Eagles via zjshen)
 
+    YARN-3300. Outstanding_resource_requests table should not be shown in AHS.
+    (Xuan Gong via jianhe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3003eba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
index 1e0a342..540f6e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
@@ -42,6 +42,8 @@ public class AppAttemptPage extends AHSView {
     set(DATATABLES_ID, "containers");
     set(initID(DATATABLES, "containers"), WebPageUtils.containersTableInit());
     setTableStyles(html, "containers", ".queue {width:6em}", ".ui {width:8em}");
+
+    set(YarnWebParams.WEB_UI_TYPE, YarnWebParams.APP_HISTORY_WEB_UI);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3003eba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
index ea33f4f..1bba4d8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
@@ -19,6 +19,11 @@ package org.apache.hadoop.yarn.server.webapp;
 
 import static org.apache.hadoop.yarn.util.StringHelper.join;
 import static org.apache.hadoop.yarn.webapp.YarnWebParams.APPLICATION_ATTEMPT_ID;
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.WEB_UI_TYPE;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
 
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
@@ -38,7 +43,9 @@ import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
 import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
@@ -59,6 +66,7 @@ public class AppAttemptBlock extends HtmlBlock {
 
   @Override
   protected void render(Block html) {
+    String webUiType = $(WEB_UI_TYPE);
     String attemptid = $(APPLICATION_ATTEMPT_ID);
     if (attemptid.isEmpty()) {
       puts("Bad request: requires application attempt ID");
@@ -213,6 +221,45 @@ public class AppAttemptBlock extends HtmlBlock {
       ._("var containersTableData=" + containersTableData)._();
 
     tbody._()._();
+
+    if (webUiType.equals(YarnWebParams.RM_WEB_UI)) {
+      createContainerLocalityTable(html); // TODO:YARN-3284
+    }
+  }
+
+  //TODO: YARN-3284
+  //The containerLocality metrics will be exposed from AttemptReport
+  private void createContainerLocalityTable(Block html) {
+    int totalAllocatedContainers = 0; //TODO: YARN-3284
+    int[][] localityStatistics = new int[0][0];//TODO:YARN-3284
+    DIV<Hamlet> div = html.div(_INFO_WRAP);
+    TABLE<DIV<Hamlet>> table =
+        div.h3(
+          "Total Allocated Containers: "
+              + totalAllocatedContainers).h3("Each table cell"
+            + " represents the number of NodeLocal/RackLocal/OffSwitch containers"
+            + " satisfied by NodeLocal/RackLocal/OffSwitch resource requests.").table(
+          "#containerLocality");
+    table.
+      tr().
+        th(_TH, "").
+        th(_TH, "Node Local Request").
+        th(_TH, "Rack Local Request").
+        th(_TH, "Off Switch Request").
+      _();
+
+    String[] containersType =
+        { "Num Node Local Containers (satisfied by)", "Num Rack Local Containers (satisfied by)",
+            "Num Off Switch Containers (satisfied by)" };
+    boolean odd = false;
+    for (int i = 0; i < localityStatistics.length; i++) {
+      table.tr((odd = !odd) ? _ODD : _EVEN).td(containersType[i])
+        .td(String.valueOf(localityStatistics[i][0]))
+        .td(i == 0 ? "" : String.valueOf(localityStatistics[i][1]))
+        .td(i <= 1 ? "" : String.valueOf(localityStatistics[i][2]))._();
+    }
+    table._();
+    div._();
   }
 
   private boolean hasAMContainer(ContainerId containerId,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3003eba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index 2db88ae..d4c20d4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -21,11 +21,7 @@ package org.apache.hadoop.yarn.server.webapp;
 import static org.apache.hadoop.yarn.util.StringHelper.join;
 import static org.apache.hadoop.yarn.webapp.YarnWebParams.APPLICATION_ID;
 import static org.apache.hadoop.yarn.webapp.YarnWebParams.WEB_UI_TYPE;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
-
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 import java.util.List;
@@ -327,43 +323,9 @@ public class AppBlock extends HtmlBlock {
 
     tbody._()._();
 
-    createContainerLocalityTable(html); //TODO:YARN-3284
-    createResourceRequestsTable(html, null); //TODO:YARN-3284
-  }
-
-  //TODO: YARN-3284
-  //The containerLocality metrics will be exposed from AttemptReport
-  private void createContainerLocalityTable(Block html) {
-    int totalAllocatedContainers = 0; //TODO: YARN-3284
-    int[][] localityStatistics = new int[0][0];//TODO:YARN-3284
-    DIV<Hamlet> div = html.div(_INFO_WRAP);
-    TABLE<DIV<Hamlet>> table =
-        div.h3(
-          "Total Allocated Containers: "
-              + totalAllocatedContainers).h3("Each table cell"
-            + " represents the number of NodeLocal/RackLocal/OffSwitch containers"
-            + " satisfied by NodeLocal/RackLocal/OffSwitch resource requests.").table(
-          "#containerLocality");
-    table.
-      tr().
-        th(_TH, "").
-        th(_TH, "Node Local Request").
-        th(_TH, "Rack Local Request").
-        th(_TH, "Off Switch Request").
-      _();
-
-    String[] containersType =
-        { "Num Node Local Containers (satisfied by)", "Num Rack Local Containers (satisfied by)",
-            "Num Off Switch Containers (satisfied by)" };
-    boolean odd = false;
-    for (int i = 0; i < localityStatistics.length; i++) {
-      table.tr((odd = !odd) ? _ODD : _EVEN).td(containersType[i])
-        .td(String.valueOf(localityStatistics[i][0]))
-        .td(i == 0 ? "" : String.valueOf(localityStatistics[i][1]))
-        .td(i <= 1 ? "" : String.valueOf(localityStatistics[i][2]))._();
+    if (webUiType != null && webUiType.equals(YarnWebParams.RM_WEB_UI)) {
+      createResourceRequestsTable(html, null); // TODO:YARN-3284
     }
-    table._();
-    div._();
   }
 
   //TODO:YARN-3284

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3003eba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
index 92eae48..6e4cfad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
@@ -45,6 +45,8 @@ public class AppAttemptPage extends RmView {
     set(DATATABLES_ID, "containers");
     set(initID(DATATABLES, "containers"), WebPageUtils.containersTableInit());
     setTableStyles(html, "containers", ".queue {width:6em}", ".ui {width:8em}");
+
+    set(YarnWebParams.WEB_UI_TYPE, YarnWebParams.RM_WEB_UI);
   }
 
   @Override


[14/49] hadoop git commit: HDFS-7411. Change decommission logic to throttle by blocks rather than nodes in each interval. Contributed by Andrew Wang

Posted by zj...@apache.org.
HDFS-7411. Change decommission logic to throttle by blocks rather
than nodes in each interval. Contributed by Andrew Wang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ee0d32b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ee0d32b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ee0d32b

Branch: refs/heads/YARN-2928
Commit: 6ee0d32b98bc3aa5ed42859f1325d5a14fd1722a
Parents: 7ce3c76
Author: Chris Douglas <cd...@apache.org>
Authored: Sun Mar 8 18:31:04 2015 -0700
Committer: Chris Douglas <cd...@apache.org>
Committed: Sun Mar 8 18:31:04 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +-
 .../apache/hadoop/hdfs/HdfsConfiguration.java   |   2 +-
 .../server/blockmanagement/BlockManager.java    | 121 +---
 .../server/blockmanagement/DatanodeManager.java | 109 +---
 .../blockmanagement/DecommissionManager.java    | 619 +++++++++++++++++--
 .../src/main/resources/hdfs-default.xml         |  23 +-
 .../apache/hadoop/hdfs/TestDecommission.java    | 412 ++++++++----
 .../blockmanagement/BlockManagerTestUtil.java   |   8 +-
 .../TestReplicationPolicyConsiderLoad.java      |   2 +-
 .../namenode/TestDecommissioningStatus.java     |  59 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |   2 +-
 .../namenode/TestNamenodeCapacityReport.java    |   4 +-
 13 files changed, 996 insertions(+), 374 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0d32b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 29717e1..3cd6372 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -719,6 +719,9 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7855. Separate class Packet from DFSOutputStream. (Li Bo bia jing9)
 
+    HDFS-7411. Change decommission logic to throttle by blocks rather than
+    nodes in each interval. (Andrew Wang via cdouglas)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0d32b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 975f023..1e864bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -453,8 +453,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final long    DFS_NAMENODE_PATH_BASED_CACHE_RETRY_INTERVAL_MS_DEFAULT = 30000L;
   public static final String  DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY = "dfs.namenode.decommission.interval";
   public static final int     DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT = 30;
-  public static final String  DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_KEY = "dfs.namenode.decommission.nodes.per.interval";
-  public static final int     DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT = 5;
+  public static final String  DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY = "dfs.namenode.decommission.blocks.per.interval";
+  public static final int     DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_DEFAULT = 500000;
+  public static final String  DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES = "dfs.namenode.decommission.max.concurrent.tracked.nodes";
+  public static final int     DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT = 100;
   public static final String  DFS_NAMENODE_HANDLER_COUNT_KEY = "dfs.namenode.handler.count";
   public static final int     DFS_NAMENODE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY = "dfs.namenode.service.handler.count";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0d32b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
index 8f2966a..29a2667 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
@@ -139,7 +139,7 @@ public class HdfsConfiguration extends Configuration {
       new DeprecationDelta("dfs.federation.nameservice.id",
         DFSConfigKeys.DFS_NAMESERVICE_ID),
       new DeprecationDelta("dfs.client.file-block-storage-locations.timeout",
-        DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS)
+        DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS),
     });
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0d32b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 58a8b94..c1a3e05 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3188,28 +3188,6 @@ public class BlockManager {
     }
     return live;
   }
-
-  private void logBlockReplicationInfo(Block block, DatanodeDescriptor srcNode,
-      NumberReplicas num) {
-    int curReplicas = num.liveReplicas();
-    int curExpectedReplicas = getReplication(block);
-    BlockCollection bc = blocksMap.getBlockCollection(block);
-    StringBuilder nodeList = new StringBuilder();
-    for(DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
-      final DatanodeDescriptor node = storage.getDatanodeDescriptor();
-      nodeList.append(node);
-      nodeList.append(" ");
-    }
-    LOG.info("Block: " + block + ", Expected Replicas: "
-        + curExpectedReplicas + ", live replicas: " + curReplicas
-        + ", corrupt replicas: " + num.corruptReplicas()
-        + ", decommissioned replicas: " + num.decommissionedReplicas()
-        + ", excess replicas: " + num.excessReplicas()
-        + ", Is Open File: " + bc.isUnderConstruction()
-        + ", Datanodes having this block: " + nodeList + ", Current Datanode: "
-        + srcNode + ", Is current datanode decommissioning: "
-        + srcNode.isDecommissionInProgress());
-  }
   
   /**
    * On stopping decommission, check if the node has excess replicas.
@@ -3240,89 +3218,30 @@ public class BlockManager {
   }
 
   /**
-   * Return true if there are any blocks on this node that have not
-   * yet reached their replication factor. Otherwise returns false.
+   * Returns whether a node can be safely decommissioned based on its 
+   * liveness. Dead nodes cannot always be safely decommissioned.
    */
-  boolean isReplicationInProgress(DatanodeDescriptor srcNode) {
-    boolean status = false;
-    boolean firstReplicationLog = true;
-    int underReplicatedBlocks = 0;
-    int decommissionOnlyReplicas = 0;
-    int underReplicatedInOpenFiles = 0;
-    final Iterator<? extends Block> it = srcNode.getBlockIterator();
-    while(it.hasNext()) {
-      final Block block = it.next();
-      BlockCollection bc = blocksMap.getBlockCollection(block);
-
-      if (bc != null) {
-        NumberReplicas num = countNodes(block);
-        int curReplicas = num.liveReplicas();
-        int curExpectedReplicas = getReplication(block);
-                
-        if (isNeededReplication(block, curExpectedReplicas, curReplicas)) {
-          if (curExpectedReplicas > curReplicas) {
-            if (bc.isUnderConstruction()) {
-              if (block.equals(bc.getLastBlock()) && curReplicas > minReplication) {
-                continue;
-              }
-              underReplicatedInOpenFiles++;
-            }
-            
-            // Log info about one block for this node which needs replication
-            if (!status) {
-              status = true;
-              if (firstReplicationLog) {
-                logBlockReplicationInfo(block, srcNode, num);
-              }
-              // Allowing decommission as long as default replication is met
-              if (curReplicas >= defaultReplication) {
-                status = false;
-                firstReplicationLog = false;
-              }
-            }
-            underReplicatedBlocks++;
-            if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) {
-              decommissionOnlyReplicas++;
-            }
-          }
-          if (!neededReplications.contains(block) &&
-            pendingReplications.getNumReplicas(block) == 0 &&
-            namesystem.isPopulatingReplQueues()) {
-            //
-            // These blocks have been reported from the datanode
-            // after the startDecommission method has been executed. These
-            // blocks were in flight when the decommissioning was started.
-            // Process these blocks only when active NN is out of safe mode.
-            //
-            neededReplications.add(block,
-                                   curReplicas,
-                                   num.decommissionedReplicas(),
-                                   curExpectedReplicas);
-          }
-        }
-      }
+  boolean isNodeHealthyForDecommission(DatanodeDescriptor node) {
+    if (node.isAlive) {
+      return true;
     }
 
-    if (!status && !srcNode.isAlive) {
-      updateState();
-      if (pendingReplicationBlocksCount == 0 &&
-          underReplicatedBlocksCount == 0) {
-        LOG.info("srcNode {} is dead and there are no under-replicated" +
-            " blocks or blocks pending replication. Marking as " +
-            "decommissioned.");
-      } else {
-        LOG.warn("srcNode " + srcNode + " is dead " +
-            "while decommission is in progress. Continuing to mark " +
-            "it as decommission in progress so when it rejoins the " +
-            "cluster it can continue the decommission process.");
-        status = true;
-      }
+    updateState();
+    if (pendingReplicationBlocksCount == 0 &&
+        underReplicatedBlocksCount == 0) {
+      LOG.info("Node {} is dead and there are no under-replicated" +
+          " blocks or blocks pending replication. Safe to decommission.", 
+          node);
+      return true;
     }
 
-    srcNode.decommissioningStatus.set(underReplicatedBlocks,
-        decommissionOnlyReplicas, 
-        underReplicatedInOpenFiles);
-    return status;
+    LOG.warn("Node {} is dead " +
+        "while decommission is in progress. Cannot be safely " +
+        "decommissioned since there is risk of reduced " +
+        "data durability or data loss. Either restart the failed node or" +
+        " force decommissioning by removing, calling refreshNodes, " +
+        "then re-adding to the excludes files.", node);
+    return false;
   }
 
   public int getActiveBlockCount() {
@@ -3493,7 +3412,7 @@ public class BlockManager {
    * A block needs replication if the number of replicas is less than expected
    * or if it does not have enough racks.
    */
-  private boolean isNeededReplication(Block b, int expected, int current) {
+  boolean isNeededReplication(Block b, int expected, int current) {
     return current < expected || !blockHasEnoughRacks(b);
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0d32b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 45c56a8..9179ff0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hdfs.util.CyclicIteration;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.*;
 import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
-import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Time;
 
@@ -53,8 +52,6 @@ import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.*;
 
-import static org.apache.hadoop.util.Time.now;
-
 /**
  * Manage datanodes, include decommission and other activities.
  */
@@ -65,9 +62,9 @@ public class DatanodeManager {
 
   private final Namesystem namesystem;
   private final BlockManager blockManager;
+  private final DecommissionManager decomManager;
   private final HeartbeatManager heartbeatManager;
   private final FSClusterStats fsClusterStats;
-  private Daemon decommissionthread = null;
 
   /**
    * Stores the datanode -> block map.  
@@ -110,7 +107,7 @@ public class DatanodeManager {
   private final HostFileManager hostFileManager = new HostFileManager();
 
   /** The period to wait for datanode heartbeat.*/
-  private final long heartbeatExpireInterval;
+  private long heartbeatExpireInterval;
   /** Ask Datanode only up to this many blocks to delete. */
   final int blockInvalidateLimit;
 
@@ -184,6 +181,8 @@ public class DatanodeManager {
     networktopology = NetworkTopology.getInstance(conf);
 
     this.heartbeatManager = new HeartbeatManager(namesystem, blockManager, conf);
+    this.decomManager = new DecommissionManager(namesystem, blockManager,
+        heartbeatManager);
     this.fsClusterStats = newFSClusterStats();
 
     this.defaultXferPort = NetUtils.createSocketAddr(
@@ -307,25 +306,12 @@ public class DatanodeManager {
   }
   
   void activate(final Configuration conf) {
-    final DecommissionManager dm = new DecommissionManager(namesystem, blockManager);
-    this.decommissionthread = new Daemon(dm.new Monitor(
-        conf.getInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 
-                    DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT),
-        conf.getInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_KEY, 
-                    DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT)));
-    decommissionthread.start();
-
+    decomManager.activate(conf);
     heartbeatManager.activate(conf);
   }
 
   void close() {
-    if (decommissionthread != null) {
-      decommissionthread.interrupt();
-      try {
-        decommissionthread.join(3000);
-      } catch (InterruptedException e) {
-      }
-    }
+    decomManager.close();
     heartbeatManager.close();
   }
 
@@ -340,6 +326,20 @@ public class DatanodeManager {
   }
 
   @VisibleForTesting
+  public DecommissionManager getDecomManager() {
+    return decomManager;
+  }
+
+  HostFileManager getHostFileManager() {
+    return hostFileManager;
+  }
+
+  @VisibleForTesting
+  public void setHeartbeatExpireInterval(long expiryMs) {
+    this.heartbeatExpireInterval = expiryMs;
+  }
+
+  @VisibleForTesting
   public FSClusterStats getFSClusterStats() {
     return fsClusterStats;
   }
@@ -826,63 +826,14 @@ public class DatanodeManager {
   }
 
   /**
-   * Decommission the node if it is in exclude list.
+   * Decommission the node if it is in the host exclude list.
+   *
+   * @param nodeReg datanode
    */
-  private void checkDecommissioning(DatanodeDescriptor nodeReg) { 
+  void startDecommissioningIfExcluded(DatanodeDescriptor nodeReg) {
     // If the registered node is in exclude list, then decommission it
-    if (hostFileManager.isExcluded(nodeReg)) {
-      startDecommission(nodeReg);
-    }
-  }
-
-  /**
-   * Change, if appropriate, the admin state of a datanode to 
-   * decommission completed. Return true if decommission is complete.
-   */
-  boolean checkDecommissionState(DatanodeDescriptor node) {
-    // Check to see if all blocks in this decommissioned
-    // node has reached their target replication factor.
-    if (node.isDecommissionInProgress() && node.checkBlockReportReceived()) {
-      if (!blockManager.isReplicationInProgress(node)) {
-        node.setDecommissioned();
-        LOG.info("Decommission complete for " + node);
-      }
-    }
-    return node.isDecommissioned();
-  }
-
-  /** Start decommissioning the specified datanode. */
-  @InterfaceAudience.Private
-  @VisibleForTesting
-  public void startDecommission(DatanodeDescriptor node) {
-    if (!node.isDecommissionInProgress()) {
-      if (!node.isAlive) {
-        LOG.info("Dead node " + node + " is decommissioned immediately.");
-        node.setDecommissioned();
-      } else if (!node.isDecommissioned()) {
-        for (DatanodeStorageInfo storage : node.getStorageInfos()) {
-          LOG.info("Start Decommissioning " + node + " " + storage
-              + " with " + storage.numBlocks() + " blocks");
-        }
-        heartbeatManager.startDecommission(node);
-        node.decommissioningStatus.setStartTime(now());
-
-        // all the blocks that reside on this node have to be replicated.
-        checkDecommissionState(node);
-      }
-    }
-  }
-
-  /** Stop decommissioning the specified datanodes. */
-  void stopDecommission(DatanodeDescriptor node) {
-    if (node.isDecommissionInProgress() || node.isDecommissioned()) {
-      LOG.info("Stop Decommissioning " + node);
-      heartbeatManager.stopDecommission(node);
-      // Over-replicated blocks will be detected and processed when 
-      // the dead node comes back and send in its full block report.
-      if (node.isAlive) {
-        blockManager.processOverReplicatedBlocksOnReCommission(node);
-      }
+    if (getHostFileManager().isExcluded(nodeReg)) {
+      decomManager.startDecommission(nodeReg);
     }
   }
 
@@ -993,7 +944,7 @@ public class DatanodeManager {
           // also treat the registration message as a heartbeat
           heartbeatManager.register(nodeS);
           incrementVersionCount(nodeS.getSoftwareVersion());
-          checkDecommissioning(nodeS);
+          startDecommissioningIfExcluded(nodeS);
           success = true;
         } finally {
           if (!success) {
@@ -1029,7 +980,7 @@ public class DatanodeManager {
         // because its is done when the descriptor is created
         heartbeatManager.addDatanode(nodeDescr);
         incrementVersionCount(nodeReg.getSoftwareVersion());
-        checkDecommissioning(nodeDescr);
+        startDecommissioningIfExcluded(nodeDescr);
         success = true;
       } finally {
         if (!success) {
@@ -1092,9 +1043,9 @@ public class DatanodeManager {
         node.setDisallowed(true); // case 2.
       } else {
         if (hostFileManager.isExcluded(node)) {
-          startDecommission(node); // case 3.
+          decomManager.startDecommission(node); // case 3.
         } else {
-          stopDecommission(node); // case 4.
+          decomManager.stopDecommission(node); // case 4.
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0d32b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index a234cf5..71c88f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -17,88 +17,605 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import java.util.AbstractList;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
 import java.util.Map;
+import java.util.Queue;
+import java.util.TreeMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import org.apache.hadoop.hdfs.util.CyclicIteration;
+import org.apache.hadoop.util.ChunkedArrayList;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static org.apache.hadoop.util.Time.now;
 
 /**
- * Manage node decommissioning.
+ * Manages datanode decommissioning. A background monitor thread 
+ * periodically checks the status of datanodes that are in-progress of 
+ * decommissioning.
+ * <p/>
+ * A datanode can be decommissioned in a few situations:
+ * <ul>
+ * <li>If a DN is dead, it is decommissioned immediately.</li>
+ * <li>If a DN is alive, it is decommissioned after all of its blocks 
+ * are sufficiently replicated. Merely under-replicated blocks do not 
+ * block decommissioning as long as they are above a replication 
+ * threshold.</li>
+ * </ul>
+ * In the second case, the datanode transitions to a 
+ * decommission-in-progress state and is tracked by the monitor thread. The 
+ * monitor periodically scans through the list of insufficiently replicated
+ * blocks on these datanodes to 
+ * determine if they can be decommissioned. The monitor also prunes this list 
+ * as blocks become replicated, so monitor scans will become more efficient 
+ * over time.
+ * <p/>
+ * Decommission-in-progress nodes that become dead do not progress to 
+ * decommissioned until they become live again. This prevents potential 
+ * durability loss for singly-replicated blocks (see HDFS-6791).
+ * <p/>
+ * This class depends on the FSNamesystem lock for synchronization.
  */
 @InterfaceAudience.Private
-@InterfaceStability.Evolving
-class DecommissionManager {
-  static final Log LOG = LogFactory.getLog(DecommissionManager.class);
+public class DecommissionManager {
+  private static final Logger LOG = LoggerFactory.getLogger(DecommissionManager
+      .class);
 
   private final Namesystem namesystem;
-  private final BlockManager blockmanager;
+  private final BlockManager blockManager;
+  private final HeartbeatManager hbManager;
+  private final ScheduledExecutorService executor;
+
+  /**
+   * Map containing the decommission-in-progress datanodes that are being
+   * tracked so they can be be marked as decommissioned.
+   * <p/>
+   * This holds a set of references to the under-replicated blocks on the DN at
+   * the time the DN is added to the map, i.e. the blocks that are preventing
+   * the node from being marked as decommissioned. During a monitor tick, this
+   * list is pruned as blocks becomes replicated.
+   * <p/>
+   * Note also that the reference to the list of under-replicated blocks 
+   * will be null on initial add
+   * <p/>
+   * However, this map can become out-of-date since it is not updated by block
+   * reports or other events. Before being finally marking as decommissioned,
+   * another check is done with the actual block map.
+   */
+  private final TreeMap<DatanodeDescriptor, AbstractList<BlockInfoContiguous>>
+      decomNodeBlocks;
+
+  /**
+   * Tracking a node in decomNodeBlocks consumes additional memory. To limit
+   * the impact on NN memory consumption, we limit the number of nodes in 
+   * decomNodeBlocks. Additional nodes wait in pendingNodes.
+   */
+  private final Queue<DatanodeDescriptor> pendingNodes;
+
+  private Monitor monitor = null;
 
   DecommissionManager(final Namesystem namesystem,
-      final BlockManager blockmanager) {
+      final BlockManager blockManager, final HeartbeatManager hbManager) {
     this.namesystem = namesystem;
-    this.blockmanager = blockmanager;
+    this.blockManager = blockManager;
+    this.hbManager = hbManager;
+
+    executor = Executors.newScheduledThreadPool(1,
+        new ThreadFactoryBuilder().setNameFormat("DecommissionMonitor-%d")
+            .setDaemon(true).build());
+    decomNodeBlocks = new TreeMap<>();
+    pendingNodes = new LinkedList<>();
+  }
+
+  /**
+   * Start the decommission monitor thread.
+   * @param conf
+   */
+  void activate(Configuration conf) {
+    final int intervalSecs =
+        conf.getInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
+            DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT);
+    checkArgument(intervalSecs >= 0, "Cannot set a negative " +
+        "value for " + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY);
+
+    // By default, the new configuration key overrides the deprecated one.
+    // No # node limit is set.
+    int blocksPerInterval = conf.getInt(
+        DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
+        DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_DEFAULT);
+    int nodesPerInterval = Integer.MAX_VALUE;
+
+    // If the expected key isn't present and the deprecated one is, 
+    // use the deprecated one into the new one. This overrides the 
+    // default.
+    //
+    // Also print a deprecation warning.
+    final String deprecatedKey =
+        "dfs.namenode.decommission.nodes.per.interval";
+    final String strNodes = conf.get(deprecatedKey);
+    if (strNodes != null) {
+      nodesPerInterval = Integer.parseInt(strNodes);
+      blocksPerInterval = Integer.MAX_VALUE;
+      LOG.warn("Using deprecated configuration key {} value of {}.",
+          deprecatedKey, nodesPerInterval); 
+      LOG.warn("Please update your configuration to use {} instead.", 
+          DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
+    }
+    checkArgument(blocksPerInterval > 0,
+        "Must set a positive value for "
+        + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
+
+    final int maxConcurrentTrackedNodes = conf.getInt(
+        DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES,
+        DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT);
+    checkArgument(maxConcurrentTrackedNodes >= 0, "Cannot set a negative " +
+        "value for "
+        + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES);
+
+    monitor = new Monitor(blocksPerInterval, 
+        nodesPerInterval, maxConcurrentTrackedNodes);
+    executor.scheduleAtFixedRate(monitor, intervalSecs, intervalSecs,
+        TimeUnit.SECONDS);
+
+    LOG.debug("Activating DecommissionManager with interval {} seconds, " +
+            "{} max blocks per interval, {} max nodes per interval, " +
+            "{} max concurrently tracked nodes.", intervalSecs,
+        blocksPerInterval, nodesPerInterval, maxConcurrentTrackedNodes);
+  }
+
+  /**
+   * Stop the decommission monitor thread, waiting briefly for it to terminate.
+   */
+  void close() {
+    executor.shutdownNow();
+    try {
+      executor.awaitTermination(3000, TimeUnit.MILLISECONDS);
+    } catch (InterruptedException e) {}
+  }
+
+  /**
+   * Start decommissioning the specified datanode. 
+   * @param node
+   */
+  @VisibleForTesting
+  public void startDecommission(DatanodeDescriptor node) {
+    if (!node.isDecommissionInProgress()) {
+      if (!node.isAlive) {
+        LOG.info("Dead node {} is decommissioned immediately.", node);
+        node.setDecommissioned();
+      } else if (!node.isDecommissioned()) {
+        for (DatanodeStorageInfo storage : node.getStorageInfos()) {
+          LOG.info("Starting decommission of {} {} with {} blocks", 
+              node, storage, storage.numBlocks());
+        }
+        // Update DN stats maintained by HeartbeatManager
+        hbManager.startDecommission(node);
+        node.decommissioningStatus.setStartTime(now());
+        pendingNodes.add(node);
+      }
+    } else {
+      LOG.trace("startDecommission: Node {} is already decommission in "
+              + "progress, nothing to do.", node);
+    }
+  }
+
+  /**
+   * Stop decommissioning the specified datanode. 
+   * @param node
+   */
+  void stopDecommission(DatanodeDescriptor node) {
+    if (node.isDecommissionInProgress() || node.isDecommissioned()) {
+      LOG.info("Stopping decommissioning of node {}", node);
+      // Update DN stats maintained by HeartbeatManager
+      hbManager.stopDecommission(node);
+      // Over-replicated blocks will be detected and processed when 
+      // the dead node comes back and send in its full block report.
+      if (node.isAlive) {
+        blockManager.processOverReplicatedBlocksOnReCommission(node);
+      }
+      // Remove from tracking in DecommissionManager
+      pendingNodes.remove(node);
+      decomNodeBlocks.remove(node);
+    } else {
+      LOG.trace("stopDecommission: Node {} is not decommission in progress " +
+          "or decommissioned, nothing to do.", node);
+    }
+  }
+
+  private void setDecommissioned(DatanodeDescriptor dn) {
+    dn.setDecommissioned();
+    LOG.info("Decommissioning complete for node {}", dn);
   }
 
-  /** Periodically check decommission status. */
-  class Monitor implements Runnable {
-    /** recheckInterval is how often namenode checks
-     *  if a node has finished decommission
+  /**
+   * Checks whether a block is sufficiently replicated for decommissioning.
+   * Full-strength replication is not always necessary, hence "sufficient".
+   * @return true if sufficient, else false.
+   */
+  private boolean isSufficientlyReplicated(BlockInfoContiguous block, 
+      BlockCollection bc,
+      NumberReplicas numberReplicas) {
+    final int numExpected = bc.getBlockReplication();
+    final int numLive = numberReplicas.liveReplicas();
+    if (!blockManager.isNeededReplication(block, numExpected, numLive)) {
+      // Block doesn't need replication. Skip.
+      LOG.trace("Block {} does not need replication.", block);
+      return true;
+    }
+
+    // Block is under-replicated
+    LOG.trace("Block {} numExpected={}, numLive={}", block, numExpected, 
+        numLive);
+    if (numExpected > numLive) {
+      if (bc.isUnderConstruction() && block.equals(bc.getLastBlock())) {
+        // Can decom a UC block as long as there will still be minReplicas
+        if (numLive >= blockManager.minReplication) {
+          LOG.trace("UC block {} sufficiently-replicated since numLive ({}) "
+              + ">= minR ({})", block, numLive, blockManager.minReplication);
+          return true;
+        } else {
+          LOG.trace("UC block {} insufficiently-replicated since numLive "
+              + "({}) < minR ({})", block, numLive,
+              blockManager.minReplication);
+        }
+      } else {
+        // Can decom a non-UC as long as the default replication is met
+        if (numLive >= blockManager.defaultReplication) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
+  private static void logBlockReplicationInfo(Block block, BlockCollection bc,
+      DatanodeDescriptor srcNode, NumberReplicas num,
+      Iterable<DatanodeStorageInfo> storages) {
+    int curReplicas = num.liveReplicas();
+    int curExpectedReplicas = bc.getBlockReplication();
+    StringBuilder nodeList = new StringBuilder();
+    for (DatanodeStorageInfo storage : storages) {
+      final DatanodeDescriptor node = storage.getDatanodeDescriptor();
+      nodeList.append(node);
+      nodeList.append(" ");
+    }
+    LOG.info("Block: " + block + ", Expected Replicas: "
+        + curExpectedReplicas + ", live replicas: " + curReplicas
+        + ", corrupt replicas: " + num.corruptReplicas()
+        + ", decommissioned replicas: " + num.decommissionedReplicas()
+        + ", excess replicas: " + num.excessReplicas()
+        + ", Is Open File: " + bc.isUnderConstruction()
+        + ", Datanodes having this block: " + nodeList + ", Current Datanode: "
+        + srcNode + ", Is current datanode decommissioning: "
+        + srcNode.isDecommissionInProgress());
+  }
+
+  @VisibleForTesting
+  public int getNumPendingNodes() {
+    return pendingNodes.size();
+  }
+
+  @VisibleForTesting
+  public int getNumTrackedNodes() {
+    return decomNodeBlocks.size();
+  }
+
+  @VisibleForTesting
+  public int getNumNodesChecked() {
+    return monitor.numNodesChecked;
+  }
+
+  /**
+   * Checks to see if DNs have finished decommissioning.
+   * <p/>
+   * Since this is done while holding the namesystem lock, 
+   * the amount of work per monitor tick is limited.
+   */
+  private class Monitor implements Runnable {
+    /**
+     * The maximum number of blocks to check per tick.
+     */
+    private final int numBlocksPerCheck;
+    /**
+     * The maximum number of nodes to check per tick.
      */
-    private final long recheckInterval;
-    /** The number of decommission nodes to check for each interval */
     private final int numNodesPerCheck;
-    /** firstkey can be initialized to anything. */
-    private String firstkey = "";
+    /**
+     * The maximum number of nodes to track in decomNodeBlocks. A value of 0
+     * means no limit.
+     */
+    private final int maxConcurrentTrackedNodes;
+    /**
+     * The number of blocks that have been checked on this tick.
+     */
+    private int numBlocksChecked = 0;
+    /**
+     * The number of nodes that have been checked on this tick. Used for 
+     * testing.
+     */
+    private int numNodesChecked = 0;
+    /**
+     * The last datanode in decomNodeBlocks that we've processed
+     */
+    private DatanodeDescriptor iterkey = new DatanodeDescriptor(new 
+        DatanodeID("", "", "", 0, 0, 0, 0));
 
-    Monitor(int recheckIntervalInSecond, int numNodesPerCheck) {
-      this.recheckInterval = recheckIntervalInSecond * 1000L;
+    Monitor(int numBlocksPerCheck, int numNodesPerCheck, int 
+        maxConcurrentTrackedNodes) {
+      this.numBlocksPerCheck = numBlocksPerCheck;
       this.numNodesPerCheck = numNodesPerCheck;
+      this.maxConcurrentTrackedNodes = maxConcurrentTrackedNodes;
+    }
+
+    private boolean exceededNumBlocksPerCheck() {
+      LOG.trace("Processed {} blocks so far this tick", numBlocksChecked);
+      return numBlocksChecked >= numBlocksPerCheck;
+    }
+
+    @Deprecated
+    private boolean exceededNumNodesPerCheck() {
+      LOG.trace("Processed {} nodes so far this tick", numNodesChecked);
+      return numNodesChecked >= numNodesPerCheck;
     }
 
-    /**
-     * Check decommission status of numNodesPerCheck nodes
-     * for every recheckInterval milliseconds.
-     */
     @Override
     public void run() {
-      for(; namesystem.isRunning(); ) {
-        namesystem.writeLock();
-        try {
-          check();
-        } finally {
-          namesystem.writeUnlock();
+      if (!namesystem.isRunning()) {
+        LOG.info("Namesystem is not running, skipping decommissioning checks"
+            + ".");
+        return;
+      }
+      // Reset the checked count at beginning of each iteration
+      numBlocksChecked = 0;
+      numNodesChecked = 0;
+      // Check decom progress
+      namesystem.writeLock();
+      try {
+        processPendingNodes();
+        check();
+      } finally {
+        namesystem.writeUnlock();
+      }
+      if (numBlocksChecked + numNodesChecked > 0) {
+        LOG.info("Checked {} blocks and {} nodes this tick", numBlocksChecked,
+            numNodesChecked);
+      }
+    }
+
+    /**
+     * Pop datanodes off the pending list and into decomNodeBlocks, 
+     * subject to the maxConcurrentTrackedNodes limit.
+     */
+    private void processPendingNodes() {
+      while (!pendingNodes.isEmpty() &&
+          (maxConcurrentTrackedNodes == 0 ||
+           decomNodeBlocks.size() < maxConcurrentTrackedNodes)) {
+        decomNodeBlocks.put(pendingNodes.poll(), null);
+      }
+    }
+
+    private void check() {
+      final Iterator<Map.Entry<DatanodeDescriptor, AbstractList<BlockInfoContiguous>>>
+          it = new CyclicIteration<>(decomNodeBlocks, iterkey).iterator();
+      final LinkedList<DatanodeDescriptor> toRemove = new LinkedList<>();
+
+      while (it.hasNext()
+          && !exceededNumBlocksPerCheck()
+          && !exceededNumNodesPerCheck()) {
+        numNodesChecked++;
+        final Map.Entry<DatanodeDescriptor, AbstractList<BlockInfoContiguous>>
+            entry = it.next();
+        final DatanodeDescriptor dn = entry.getKey();
+        AbstractList<BlockInfoContiguous> blocks = entry.getValue();
+        boolean fullScan = false;
+        if (blocks == null) {
+          // This is a newly added datanode, run through its list to schedule 
+          // under-replicated blocks for replication and collect the blocks 
+          // that are insufficiently replicated for further tracking
+          LOG.debug("Newly-added node {}, doing full scan to find " +
+              "insufficiently-replicated blocks.", dn);
+          blocks = handleInsufficientlyReplicated(dn);
+          decomNodeBlocks.put(dn, blocks);
+          fullScan = true;
+        } else {
+          // This is a known datanode, check if its # of insufficiently 
+          // replicated blocks has dropped to zero and if it can be decommed
+          LOG.debug("Processing decommission-in-progress node {}", dn);
+          pruneSufficientlyReplicated(dn, blocks);
         }
-  
-        try {
-          Thread.sleep(recheckInterval);
-        } catch (InterruptedException ie) {
-          LOG.warn(this.getClass().getSimpleName() + " interrupted: " + ie);
+        if (blocks.size() == 0) {
+          if (!fullScan) {
+            // If we didn't just do a full scan, need to re-check with the 
+            // full block map.
+            //
+            // We've replicated all the known insufficiently replicated 
+            // blocks. Re-check with the full block map before finally 
+            // marking the datanode as decommissioned 
+            LOG.debug("Node {} has finished replicating current set of "
+                + "blocks, checking with the full block map.", dn);
+            blocks = handleInsufficientlyReplicated(dn);
+            decomNodeBlocks.put(dn, blocks);
+          }
+          // If the full scan is clean AND the node liveness is okay, 
+          // we can finally mark as decommissioned.
+          final boolean isHealthy =
+              blockManager.isNodeHealthyForDecommission(dn);
+          if (blocks.size() == 0 && isHealthy) {
+            setDecommissioned(dn);
+            toRemove.add(dn);
+            LOG.debug("Node {} is sufficiently replicated and healthy, "
+                + "marked as decommissioned.", dn);
+          } else {
+            if (LOG.isDebugEnabled()) {
+              StringBuilder b = new StringBuilder("Node {} ");
+              if (isHealthy) {
+                b.append("is ");
+              } else {
+                b.append("isn't ");
+              }
+              b.append("healthy and still needs to replicate {} more blocks," +
+                  " decommissioning is still in progress.");
+              LOG.debug(b.toString(), dn, blocks.size());
+            }
+          }
+        } else {
+          LOG.debug("Node {} still has {} blocks to replicate "
+                  + "before it is a candidate to finish decommissioning.",
+              dn, blocks.size());
         }
+        iterkey = dn;
+      }
+      // Remove the datanodes that are decommissioned
+      for (DatanodeDescriptor dn : toRemove) {
+        Preconditions.checkState(dn.isDecommissioned(),
+            "Removing a node that is not yet decommissioned!");
+        decomNodeBlocks.remove(dn);
       }
     }
-    
-    private void check() {
-      final DatanodeManager dm = blockmanager.getDatanodeManager();
-      int count = 0;
-      for(Map.Entry<String, DatanodeDescriptor> entry
-          : dm.getDatanodeCyclicIteration(firstkey)) {
-        final DatanodeDescriptor d = entry.getValue();
-        firstkey = entry.getKey();
-
-        if (d.isDecommissionInProgress()) {
-          try {
-            dm.checkDecommissionState(d);
-          } catch(Exception e) {
-            LOG.warn("entry=" + entry, e);
+
+    /**
+     * Removes sufficiently replicated blocks from the block list of a 
+     * datanode.
+     */
+    private void pruneSufficientlyReplicated(final DatanodeDescriptor datanode,
+        AbstractList<BlockInfoContiguous> blocks) {
+      processBlocksForDecomInternal(datanode, blocks.iterator(), null, true);
+    }
+
+    /**
+     * Returns a list of blocks on a datanode that are insufficiently 
+     * replicated, i.e. are under-replicated enough to prevent decommission.
+     * <p/>
+     * As part of this, it also schedules replication work for 
+     * any under-replicated blocks.
+     *
+     * @param datanode
+     * @return List of insufficiently replicated blocks 
+     */
+    private AbstractList<BlockInfoContiguous> handleInsufficientlyReplicated(
+        final DatanodeDescriptor datanode) {
+      AbstractList<BlockInfoContiguous> insufficient = new ChunkedArrayList<>();
+      processBlocksForDecomInternal(datanode, datanode.getBlockIterator(),
+          insufficient, false);
+      return insufficient;
+    }
+
+    /**
+     * Used while checking if decommission-in-progress datanodes can be marked
+     * as decommissioned. Combines shared logic of 
+     * pruneSufficientlyReplicated and handleInsufficientlyReplicated.
+     *
+     * @param datanode                    Datanode
+     * @param it                          Iterator over the blocks on the
+     *                                    datanode
+     * @param insufficientlyReplicated    Return parameter. If it's not null,
+     *                                    will contain the insufficiently
+     *                                    replicated-blocks from the list.
+     * @param pruneSufficientlyReplicated whether to remove sufficiently
+     *                                    replicated blocks from the iterator
+     * @return true if there are under-replicated blocks in the provided block
+     * iterator, else false.
+     */
+    private void processBlocksForDecomInternal(
+        final DatanodeDescriptor datanode,
+        final Iterator<BlockInfoContiguous> it,
+        final List<BlockInfoContiguous> insufficientlyReplicated,
+        boolean pruneSufficientlyReplicated) {
+      boolean firstReplicationLog = true;
+      int underReplicatedBlocks = 0;
+      int decommissionOnlyReplicas = 0;
+      int underReplicatedInOpenFiles = 0;
+      while (it.hasNext()) {
+        numBlocksChecked++;
+        final BlockInfoContiguous block = it.next();
+        // Remove the block from the list if it's no longer in the block map,
+        // e.g. the containing file has been deleted
+        if (blockManager.blocksMap.getStoredBlock(block) == null) {
+          LOG.trace("Removing unknown block {}", block);
+          it.remove();
+          continue;
+        }
+        BlockCollection bc = blockManager.blocksMap.getBlockCollection(block);
+        if (bc == null) {
+          // Orphan block, will be invalidated eventually. Skip.
+          continue;
+        }
+
+        final NumberReplicas num = blockManager.countNodes(block);
+        final int liveReplicas = num.liveReplicas();
+        final int curReplicas = liveReplicas;
+
+        // Schedule under-replicated blocks for replication if not already
+        // pending
+        if (blockManager.isNeededReplication(block, bc.getBlockReplication(),
+            liveReplicas)) {
+          if (!blockManager.neededReplications.contains(block) &&
+              blockManager.pendingReplications.getNumReplicas(block) == 0 &&
+              namesystem.isPopulatingReplQueues()) {
+            // Process these blocks only when active NN is out of safe mode.
+            blockManager.neededReplications.add(block,
+                curReplicas,
+                num.decommissionedReplicas(),
+                bc.getBlockReplication());
           }
-          if (++count == numNodesPerCheck) {
-            return;
+        }
+
+        // Even if the block is under-replicated, 
+        // it doesn't block decommission if it's sufficiently replicated 
+        if (isSufficientlyReplicated(block, bc, num)) {
+          if (pruneSufficientlyReplicated) {
+            it.remove();
           }
+          continue;
+        }
+
+        // We've found an insufficiently replicated block.
+        if (insufficientlyReplicated != null) {
+          insufficientlyReplicated.add(block);
+        }
+        // Log if this is our first time through
+        if (firstReplicationLog) {
+          logBlockReplicationInfo(block, bc, datanode, num,
+              blockManager.blocksMap.getStorages(block));
+          firstReplicationLog = false;
+        }
+        // Update various counts
+        underReplicatedBlocks++;
+        if (bc.isUnderConstruction()) {
+          underReplicatedInOpenFiles++;
+        }
+        if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) {
+          decommissionOnlyReplicas++;
         }
       }
+
+      datanode.decommissioningStatus.set(underReplicatedBlocks,
+          decommissionOnlyReplicas,
+          underReplicatedInOpenFiles);
     }
   }
+
+  @VisibleForTesting
+  void runMonitor() throws ExecutionException, InterruptedException {
+    Future f = executor.submit(monitor);
+    f.get();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0d32b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 7eacfc5..736c96a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -736,10 +736,25 @@
 </property>
 
 <property>
-  <name>dfs.namenode.decommission.nodes.per.interval</name>
-  <value>5</value>
-  <description>The number of nodes namenode checks if decommission is complete
-  in each dfs.namenode.decommission.interval.</description>
+  <name>dfs.namenode.decommission.blocks.per.interval</name>
+  <value>500000</value>
+  <description>The approximate number of blocks to process per 
+      decommission interval, as defined in dfs.namenode.decommission.interval.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.decommission.max.concurrent.tracked.nodes</name>
+  <value>100</value>
+  <description>
+    The maximum number of decommission-in-progress datanodes nodes that will be
+    tracked at one time by the namenode. Tracking a decommission-in-progress
+    datanode consumes additional NN memory proportional to the number of blocks
+    on the datnode. Having a conservative limit reduces the potential impact
+    of decomissioning a large number of nodes at once.
+      
+    A value of 0 means no limit will be enforced.
+  </description>
 </property>
 
 <property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0d32b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 35c0d8c..081e40f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
@@ -26,39 +27,56 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Random;
+import java.util.concurrent.ExecutionException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import com.google.common.base.Supplier;
+import com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
+import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class tests the decommissioning of nodes.
  */
 public class TestDecommission {
-  public static final Log LOG = LogFactory.getLog(TestDecommission.class);
+  public static final Logger LOG = LoggerFactory.getLogger(TestDecommission
+      .class);
   static final long seed = 0xDEADBEEFL;
   static final int blockSize = 8192;
   static final int fileSize = 16384;
@@ -90,6 +108,7 @@ public class TestDecommission {
     conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, BLOCKREPORT_INTERVAL_MSEC);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, NAMENODE_REPLICATION_INTERVAL);
@@ -106,7 +125,7 @@ public class TestDecommission {
     }
   }
   
-  private void writeConfigFile(Path name, ArrayList<String> nodes) 
+  private void writeConfigFile(Path name, List<String> nodes) 
     throws IOException {
     // delete if it already exists
     if (localFileSys.exists(name)) {
@@ -150,7 +169,7 @@ public class TestDecommission {
    * @param downnode - if null, there is no decommissioned node for this file.
    * @return - null if no failure found, else an error message string.
    */
-  private String checkFile(FileSystem fileSys, Path name, int repl,
+  private static String checkFile(FileSystem fileSys, Path name, int repl,
     String downnode, int numDatanodes) throws IOException {
     boolean isNodeDown = (downnode != null);
     // need a raw stream
@@ -262,7 +281,7 @@ public class TestDecommission {
   /* Ask a specific NN to stop decommission of the datanode and wait for each
    * to reach the NORMAL state.
    */
-  private void recomissionNode(int nnIndex, DatanodeInfo decommissionedNode) throws IOException {
+  private void recommissionNode(int nnIndex, DatanodeInfo decommissionedNode) throws IOException {
     LOG.info("Recommissioning node: " + decommissionedNode);
     writeConfigFile(excludeFile, null);
     refreshNodes(cluster.getNamesystem(nnIndex), conf);
@@ -280,7 +299,7 @@ public class TestDecommission {
       LOG.info("Waiting for node " + node + " to change state to "
           + state + " current state: " + node.getAdminState());
       try {
-        Thread.sleep(HEARTBEAT_INTERVAL * 1000);
+        Thread.sleep(HEARTBEAT_INTERVAL * 500);
       } catch (InterruptedException e) {
         // nothing
       }
@@ -322,28 +341,27 @@ public class TestDecommission {
   }
   
   private void verifyStats(NameNode namenode, FSNamesystem fsn,
-      DatanodeInfo node, boolean decommissioning)
+      DatanodeInfo info, DataNode node, boolean decommissioning)
       throws InterruptedException, IOException {
-    // Do the stats check over 10 iterations
+    // Do the stats check over 10 heartbeats
     for (int i = 0; i < 10; i++) {
       long[] newStats = namenode.getRpcServer().getStats();
 
       // For decommissioning nodes, ensure capacity of the DN is no longer
       // counted. Only used space of the DN is counted in cluster capacity
-      assertEquals(newStats[0], decommissioning ? node.getDfsUsed() : 
-        node.getCapacity());
+      assertEquals(newStats[0],
+          decommissioning ? info.getDfsUsed() : info.getCapacity());
 
       // Ensure cluster used capacity is counted for both normal and
       // decommissioning nodes
-      assertEquals(newStats[1], node.getDfsUsed());
+      assertEquals(newStats[1], info.getDfsUsed());
 
       // For decommissioning nodes, remaining space from the DN is not counted
-      assertEquals(newStats[2], decommissioning ? 0 : node.getRemaining());
+      assertEquals(newStats[2], decommissioning ? 0 : info.getRemaining());
 
       // Ensure transceiver count is same as that DN
-      assertEquals(fsn.getTotalLoad(), node.getXceiverCount());
-      
-      Thread.sleep(HEARTBEAT_INTERVAL * 1000); // Sleep heart beat interval
+      assertEquals(fsn.getTotalLoad(), info.getXceiverCount());
+      DataNodeTestUtils.triggerHeartbeat(node);
     }
   }
 
@@ -408,14 +426,6 @@ public class TestDecommission {
   }
   
   /**
-   * Tests recommission for non federated cluster
-   */
-  @Test(timeout=360000)
-  public void testRecommission() throws IOException {
-    testRecommission(1, 6);
-  }
-
-  /**
    * Test decommission for federeated cluster
    */
   @Test(timeout=360000)
@@ -501,12 +511,12 @@ public class TestDecommission {
     //    1. the last DN would have been chosen as excess replica, given its
     //    heartbeat is considered old.
     //    Please refer to BlockPlacementPolicyDefault#chooseReplicaToDelete
-    //    2. After recomissionNode finishes, SBN has 3 live replicas ( 0, 1, 2 )
+    //    2. After recommissionNode finishes, SBN has 3 live replicas ( 0, 1, 2 )
     //    and one excess replica ( 3 )
     // After the fix,
-    //    After recomissionNode finishes, SBN has 4 live replicas ( 0, 1, 2, 3 )
+    //    After recommissionNode finishes, SBN has 4 live replicas ( 0, 1, 2, 3 )
     Thread.sleep(slowHeartbeatDNwaitTime);
-    recomissionNode(1, decomNodeFromSBN);
+    recommissionNode(1, decomNodeFromSBN);
 
     // Step 3.b, ask ANN to recommission the first DN.
     // To verify the fix, the test makes sure the excess replica picked by ANN
@@ -525,7 +535,7 @@ public class TestDecommission {
     cluster.restartDataNode(nextToLastDNprop);
     cluster.waitActive();
     Thread.sleep(slowHeartbeatDNwaitTime);
-    recomissionNode(0, decommissionedNodeFromANN);
+    recommissionNode(0, decommissionedNodeFromANN);
 
     // Step 3.c, make sure the DN has deleted the block and report to NNs
     cluster.triggerHeartbeats();
@@ -607,69 +617,88 @@ public class TestDecommission {
     cluster.shutdown();
   }
 
+  /**
+   * Test that over-replicated blocks are deleted on recommission.
+   */
+  @Test(timeout=120000)
+  public void testRecommission() throws Exception {
+    final int numDatanodes = 6;
+    try {
+      LOG.info("Starting test testRecommission");
 
-  private void testRecommission(int numNamenodes, int numDatanodes) 
-    throws IOException {
-    LOG.info("Starting test testRecommission");
+      startCluster(1, numDatanodes, conf);
 
-    startCluster(numNamenodes, numDatanodes, conf);
-  
-    ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList = 
-      new ArrayList<ArrayList<DatanodeInfo>>(numNamenodes);
-    for(int i = 0; i < numNamenodes; i++) {
-      namenodeDecomList.add(i, new ArrayList<DatanodeInfo>(numDatanodes));
-    }
-    Path file1 = new Path("testDecommission.dat");
-    int replicas = numDatanodes - 1;
-      
-    for (int i = 0; i < numNamenodes; i++) {
-      ArrayList<DatanodeInfo> decommissionedNodes = namenodeDecomList.get(i);
-      FileSystem fileSys = cluster.getFileSystem(i);
+      final Path file1 = new Path("testDecommission.dat");
+      final int replicas = numDatanodes - 1;
+
+      ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
+      final FileSystem fileSys = cluster.getFileSystem();
+
+      // Write a file to n-1 datanodes
       writeFile(fileSys, file1, replicas);
-        
-      // Decommission one node. Verify that node is decommissioned.
-      DatanodeInfo decomNode = decommissionNode(i, null, decommissionedNodes,
-          AdminStates.DECOMMISSIONED);
+
+      // Decommission one of the datanodes with a replica
+      BlockLocation loc = fileSys.getFileBlockLocations(file1, 0, 1)[0];
+      assertEquals("Unexpected number of replicas from getFileBlockLocations",
+          replicas, loc.getHosts().length);
+      final String toDecomHost = loc.getNames()[0];
+      String toDecomUuid = null;
+      for (DataNode d : cluster.getDataNodes()) {
+        if (d.getDatanodeId().getXferAddr().equals(toDecomHost)) {
+          toDecomUuid = d.getDatanodeId().getDatanodeUuid();
+          break;
+        }
+      }
+      assertNotNull("Could not find a dn with the block!", toDecomUuid);
+      final DatanodeInfo decomNode =
+          decommissionNode(0, toDecomUuid, decommissionedNodes,
+              AdminStates.DECOMMISSIONED);
       decommissionedNodes.add(decomNode);
-        
+      final BlockManager blockManager =
+          cluster.getNamesystem().getBlockManager();
+      final DatanodeManager datanodeManager =
+          blockManager.getDatanodeManager();
+      BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
+
       // Ensure decommissioned datanode is not automatically shutdown
-      DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
-      assertEquals("All datanodes must be alive", numDatanodes, 
+      DFSClient client = getDfsClient(cluster.getNameNode(), conf);
+      assertEquals("All datanodes must be alive", numDatanodes,
           client.datanodeReport(DatanodeReportType.LIVE).length);
-      int tries =0;
+
       // wait for the block to be replicated
-      while (tries++ < 20) {
-        try {
-          Thread.sleep(1000);
-          if (checkFile(fileSys, file1, replicas, decomNode.getXferAddr(),
-              numDatanodes) == null) {
-            break;
-          }
-        } catch (InterruptedException ie) {
-        }
-      }
-      assertTrue("Checked if block was replicated after decommission, tried "
-          + tries + " times.", tries < 20);
-
-      // stop decommission and check if the new replicas are removed
-      recomissionNode(0, decomNode);
-      // wait for the block to be deleted
-      tries = 0;
-      while (tries++ < 20) {
-        try {
-          Thread.sleep(1000);
-          if (checkFile(fileSys, file1, replicas, null, numDatanodes) == null) {
-            break;
+      final ExtendedBlock b = DFSTestUtil.getFirstBlock(fileSys, file1);
+      final String uuid = toDecomUuid;
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override
+        public Boolean get() {
+          BlockInfoContiguous info =
+              blockManager.getStoredBlock(b.getLocalBlock());
+          int count = 0;
+          StringBuilder sb = new StringBuilder("Replica locations: ");
+          for (int i = 0; i < info.numNodes(); i++) {
+            DatanodeDescriptor dn = info.getDatanode(i);
+            sb.append(dn + ", ");
+            if (!dn.getDatanodeUuid().equals(uuid)) {
+              count++;
+            }
           }
-        } catch (InterruptedException ie) {
+          LOG.info(sb.toString());
+          LOG.info("Count: " + count);
+          return count == replicas;
         }
-      }
+      }, 500, 30000);
+
+      // redecommission and wait for over-replication to be fixed
+      recommissionNode(0, decomNode);
+      BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
+      DFSTestUtil.waitForReplication(cluster, b, 1, replicas, 0);
+
       cleanupFile(fileSys, file1);
-      assertTrue("Checked if node was recommissioned " + tries + " times.",
-         tries < 20);
-      LOG.info("tried: " + tries + " times before recommissioned");
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
     }
-    cluster.shutdown();
   }
   
   /**
@@ -703,20 +732,35 @@ public class TestDecommission {
       
       FSNamesystem fsn = cluster.getNamesystem(i);
       NameNode namenode = cluster.getNameNode(i);
-      DatanodeInfo downnode = decommissionNode(i, null, null,
+      
+      DatanodeInfo decomInfo = decommissionNode(i, null, null,
           AdminStates.DECOMMISSION_INPROGRESS);
+      DataNode decomNode = getDataNode(decomInfo);
       // Check namenode stats for multiple datanode heartbeats
-      verifyStats(namenode, fsn, downnode, true);
+      verifyStats(namenode, fsn, decomInfo, decomNode, true);
       
       // Stop decommissioning and verify stats
       writeConfigFile(excludeFile, null);
       refreshNodes(fsn, conf);
-      DatanodeInfo ret = NameNodeAdapter.getDatanode(fsn, downnode);
-      waitNodeState(ret, AdminStates.NORMAL);
-      verifyStats(namenode, fsn, ret, false);
+      DatanodeInfo retInfo = NameNodeAdapter.getDatanode(fsn, decomInfo);
+      DataNode retNode = getDataNode(decomInfo);
+      waitNodeState(retInfo, AdminStates.NORMAL);
+      verifyStats(namenode, fsn, retInfo, retNode, false);
     }
   }
-  
+
+  private DataNode getDataNode(DatanodeInfo decomInfo) {
+    DataNode decomNode = null;
+    for (DataNode dn: cluster.getDataNodes()) {
+      if (decomInfo.equals(dn.getDatanodeId())) {
+        decomNode = dn;
+        break;
+      }
+    }
+    assertNotNull("Could not find decomNode in cluster!", decomNode);
+    return decomNode;
+  }
+
   /**
    * Test host/include file functionality. Only datanodes
    * in the include file are allowed to connect to the namenode in a non
@@ -902,9 +946,9 @@ public class TestDecommission {
    * It is not recommended to use a registration name which is not also a
    * valid DNS hostname for the DataNode.  See HDFS-5237 for background.
    */
+  @Ignore
   @Test(timeout=360000)
-  public void testIncludeByRegistrationName() throws IOException,
-      InterruptedException {
+  public void testIncludeByRegistrationName() throws Exception {
     Configuration hdfsConf = new Configuration(conf);
     // Any IPv4 address starting with 127 functions as a "loopback" address
     // which is connected to the current host.  So by choosing 127.0.0.100
@@ -927,15 +971,22 @@ public class TestDecommission {
     refreshNodes(cluster.getNamesystem(0), hdfsConf);
 
     // Wait for the DN to be marked dead.
-    DFSClient client = getDfsClient(cluster.getNameNode(0), hdfsConf);
-    while (true) {
-      DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.DEAD);
-      if (info.length == 1) {
-        break;
+    LOG.info("Waiting for DN to be marked as dead.");
+    final DFSClient client = getDfsClient(cluster.getNameNode(0), hdfsConf);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        BlockManagerTestUtil
+            .checkHeartbeat(cluster.getNamesystem().getBlockManager());
+        try {
+          DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.DEAD);
+          return info.length == 1;
+        } catch (IOException e) {
+          LOG.warn("Failed to check dead DNs", e);
+          return false;
+        }
       }
-      LOG.info("Waiting for datanode to be marked dead");
-      Thread.sleep(HEARTBEAT_INTERVAL * 1000);
-    }
+    }, 500, 5000);
 
     // Use a non-empty include file with our registration name.
     // It should work.
@@ -945,18 +996,169 @@ public class TestDecommission {
     writeConfigFile(hostsFile,  nodes);
     refreshNodes(cluster.getNamesystem(0), hdfsConf);
     cluster.restartDataNode(0);
+    cluster.triggerHeartbeats();
 
     // Wait for the DN to come back.
-    while (true) {
-      DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.LIVE);
-      if (info.length == 1) {
-        Assert.assertFalse(info[0].isDecommissioned());
-        Assert.assertFalse(info[0].isDecommissionInProgress());
-        assertEquals(registrationName, info[0].getHostName());
-        break;
+    LOG.info("Waiting for DN to come back.");
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        BlockManagerTestUtil
+            .checkHeartbeat(cluster.getNamesystem().getBlockManager());
+        try {
+          DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.LIVE);
+          if (info.length == 1) {
+            Assert.assertFalse(info[0].isDecommissioned());
+            Assert.assertFalse(info[0].isDecommissionInProgress());
+            assertEquals(registrationName, info[0].getHostName());
+            return true;
+          }
+        } catch (IOException e) {
+          LOG.warn("Failed to check dead DNs", e);
+        }
+        return false;
       }
-      LOG.info("Waiting for datanode to come back");
-      Thread.sleep(HEARTBEAT_INTERVAL * 1000);
+    }, 500, 5000);
+  }
+  
+  @Test(timeout=120000)
+  public void testBlocksPerInterval() throws Exception {
+    Configuration newConf = new Configuration(conf);
+    org.apache.log4j.Logger.getLogger(DecommissionManager.class)
+        .setLevel(Level.TRACE);
+    // Turn the blocks per interval way down
+    newConf.setInt(
+        DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
+        3);
+    // Disable the normal monitor runs
+    newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
+        Integer.MAX_VALUE);
+    startCluster(1, 3, newConf);
+    final FileSystem fs = cluster.getFileSystem();
+    final DatanodeManager datanodeManager =
+        cluster.getNamesystem().getBlockManager().getDatanodeManager();
+    final DecommissionManager decomManager = datanodeManager.getDecomManager();
+
+    // Write a 3 block file, so each node has one block. Should scan 3 nodes.
+    DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA);
+    doDecomCheck(datanodeManager, decomManager, 3);
+    // Write another file, should only scan two
+    DFSTestUtil.createFile(fs, new Path("/file2"), 64, (short)3, 0xBAD1DEA);
+    doDecomCheck(datanodeManager, decomManager, 2);
+    // One more file, should only scan 1
+    DFSTestUtil.createFile(fs, new Path("/file3"), 64, (short)3, 0xBAD1DEA);
+    doDecomCheck(datanodeManager, decomManager, 1);
+    // blocks on each DN now exceeds limit, still scan at least one node
+    DFSTestUtil.createFile(fs, new Path("/file4"), 64, (short)3, 0xBAD1DEA);
+    doDecomCheck(datanodeManager, decomManager, 1);
+  }
+
+  @Deprecated
+  @Test(timeout=120000)
+  public void testNodesPerInterval() throws Exception {
+    Configuration newConf = new Configuration(conf);
+    org.apache.log4j.Logger.getLogger(DecommissionManager.class)
+        .setLevel(Level.TRACE);
+    // Set the deprecated configuration key which limits the # of nodes per 
+    // interval
+    newConf.setInt("dfs.namenode.decommission.nodes.per.interval", 1);
+    // Disable the normal monitor runs
+    newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
+        Integer.MAX_VALUE);
+    startCluster(1, 3, newConf);
+    final FileSystem fs = cluster.getFileSystem();
+    final DatanodeManager datanodeManager =
+        cluster.getNamesystem().getBlockManager().getDatanodeManager();
+    final DecommissionManager decomManager = datanodeManager.getDecomManager();
+
+    // Write a 3 block file, so each node has one block. Should scan 1 node 
+    // each time.
+    DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA);
+    for (int i=0; i<3; i++) {
+      doDecomCheck(datanodeManager, decomManager, 1);
     }
   }
+
+  private void doDecomCheck(DatanodeManager datanodeManager,
+      DecommissionManager decomManager, int expectedNumCheckedNodes)
+      throws IOException, ExecutionException, InterruptedException {
+    // Decom all nodes
+    ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
+    for (DataNode d: cluster.getDataNodes()) {
+      DatanodeInfo dn = decommissionNode(0, d.getDatanodeUuid(),
+          decommissionedNodes,
+          AdminStates.DECOMMISSION_INPROGRESS);
+      decommissionedNodes.add(dn);
+    }
+    // Run decom scan and check
+    BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
+    assertEquals("Unexpected # of nodes checked", expectedNumCheckedNodes, 
+        decomManager.getNumNodesChecked());
+    // Recommission all nodes
+    for (DatanodeInfo dn : decommissionedNodes) {
+      recommissionNode(0, dn);
+    }
+  }
+
+  @Test(timeout=120000)
+  public void testPendingNodes() throws Exception {
+    Configuration newConf = new Configuration(conf);
+    org.apache.log4j.Logger.getLogger(DecommissionManager.class)
+        .setLevel(Level.TRACE);
+    // Only allow one node to be decom'd at a time
+    newConf.setInt(
+        DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES,
+        1);
+    // Disable the normal monitor runs
+    newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 
+        Integer.MAX_VALUE);
+    startCluster(1, 3, newConf);
+    final FileSystem fs = cluster.getFileSystem();
+    final DatanodeManager datanodeManager =
+        cluster.getNamesystem().getBlockManager().getDatanodeManager();
+    final DecommissionManager decomManager = datanodeManager.getDecomManager();
+
+    // Keep a file open to prevent decom from progressing
+    HdfsDataOutputStream open1 =
+        (HdfsDataOutputStream) fs.create(new Path("/openFile1"), (short)3);
+    // Flush and trigger block reports so the block definitely shows up on NN
+    open1.write(123);
+    open1.hflush();
+    for (DataNode d: cluster.getDataNodes()) {
+      DataNodeTestUtils.triggerBlockReport(d);
+    }
+    // Decom two nodes, so one is still alive
+    ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
+    for (int i=0; i<2; i++) {
+      final DataNode d = cluster.getDataNodes().get(i);
+      DatanodeInfo dn = decommissionNode(0, d.getDatanodeUuid(), 
+          decommissionedNodes, 
+          AdminStates.DECOMMISSION_INPROGRESS);
+      decommissionedNodes.add(dn);
+    }
+
+    for (int i=2; i>=0; i--) {
+      assertTrackedAndPending(decomManager, 0, i);
+      BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
+    }
+
+    // Close file, try to decom the last node, should get stuck in tracked
+    open1.close();
+    final DataNode d = cluster.getDataNodes().get(2);
+    DatanodeInfo dn = decommissionNode(0, d.getDatanodeUuid(),
+        decommissionedNodes,
+        AdminStates.DECOMMISSION_INPROGRESS);
+    decommissionedNodes.add(dn);
+    BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
+    
+    assertTrackedAndPending(decomManager, 1, 0);
+  }
+
+  private void assertTrackedAndPending(DecommissionManager decomManager,
+      int tracked, int pending) {
+    assertEquals("Unexpected number of tracked nodes", tracked,
+        decomManager.getNumTrackedNodes());
+    assertEquals("Unexpected number of pending nodes", pending,
+        decomManager.getNumPendingNodes());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0d32b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
index fccd308..f61176e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
@@ -22,6 +22,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Set;
+import java.util.concurrent.ExecutionException;
 
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -300,9 +301,8 @@ public class BlockManagerTestUtil {
    * Have DatanodeManager check decommission state.
    * @param dm the DatanodeManager to manipulate
    */
-  public static void checkDecommissionState(DatanodeManager dm,
-      DatanodeDescriptor node) {
-    dm.checkDecommissionState(node);
+  public static void recheckDecommissionState(DatanodeManager dm)
+      throws ExecutionException, InterruptedException {
+    dm.getDecomManager().runMonitor();
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0d32b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
index d9066e8..d514768 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
@@ -137,7 +137,7 @@ public class TestReplicationPolicyConsiderLoad {
       // returns false
       for (int i = 0; i < 3; i++) {
         DatanodeDescriptor d = dnManager.getDatanode(dnrList.get(i));
-        dnManager.startDecommission(d);
+        dnManager.getDecomManager().startDecommission(d);
         d.setDecommissioned();
       }
       assertEquals((double)load/3, dnManager.getFSClusterStats()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0d32b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
index a9aba86..789ee6f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
@@ -29,7 +28,6 @@ import java.util.Arrays;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Random;
-import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.io.output.ByteArrayOutputStream;
 import org.apache.hadoop.conf.Configuration;
@@ -53,7 +51,12 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -90,7 +93,8 @@ public class TestDecommissioningStatus {
     conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
     Path includeFile = new Path(dir, "include");
     conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
+        1000);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
         4);
@@ -104,6 +108,9 @@ public class TestDecommissioningStatus {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
     cluster.waitActive();
     fileSys = cluster.getFileSystem();
+    cluster.getNamesystem().getBlockManager().getDatanodeManager()
+        .setHeartbeatExpireInterval(3000);
+    Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
   }
 
   @AfterClass
@@ -186,13 +193,16 @@ public class TestDecommissioningStatus {
   private void checkDecommissionStatus(DatanodeDescriptor decommNode,
       int expectedUnderRep, int expectedDecommissionOnly,
       int expectedUnderRepInOpenFiles) {
-    assertEquals(decommNode.decommissioningStatus.getUnderReplicatedBlocks(),
-        expectedUnderRep);
+    assertEquals("Unexpected num under-replicated blocks",
+        expectedUnderRep,
+        decommNode.decommissioningStatus.getUnderReplicatedBlocks());
+    assertEquals("Unexpected number of decom-only replicas",
+        expectedDecommissionOnly,
+        decommNode.decommissioningStatus.getDecommissionOnlyReplicas());
     assertEquals(
-        decommNode.decommissioningStatus.getDecommissionOnlyReplicas(),
-        expectedDecommissionOnly);
-    assertEquals(decommNode.decommissioningStatus
-        .getUnderReplicatedInOpenFiles(), expectedUnderRepInOpenFiles);
+        "Unexpected number of replicas in under-replicated open files",
+        expectedUnderRepInOpenFiles,
+        decommNode.decommissioningStatus.getUnderReplicatedInOpenFiles());
   }
 
   private void checkDFSAdminDecommissionStatus(
@@ -244,7 +254,7 @@ public class TestDecommissioningStatus {
    * Tests Decommissioning Status in DFS.
    */
   @Test
-  public void testDecommissionStatus() throws IOException, InterruptedException {
+  public void testDecommissionStatus() throws Exception {
     InetSocketAddress addr = new InetSocketAddress("localhost", cluster
         .getNameNodePort());
     DFSClient client = new DFSClient(addr, conf);
@@ -253,7 +263,7 @@ public class TestDecommissioningStatus {
     DistributedFileSystem fileSys = cluster.getFileSystem();
     DFSAdmin admin = new DFSAdmin(cluster.getConfiguration(0));
 
-    short replicas = 2;
+    short replicas = numDatanodes;
     //
     // Decommission one node. Verify the decommission status
     // 
@@ -263,7 +273,9 @@ public class TestDecommissioningStatus {
 
     Path file2 = new Path("decommission1.dat");
     FSDataOutputStream st1 = writeIncompleteFile(fileSys, file2, replicas);
-    Thread.sleep(5000);
+    for (DataNode d: cluster.getDataNodes()) {
+      DataNodeTestUtils.triggerBlockReport(d);
+    }
 
     FSNamesystem fsn = cluster.getNamesystem();
     final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
@@ -271,19 +283,22 @@ public class TestDecommissioningStatus {
       String downnode = decommissionNode(fsn, client, localFileSys, iteration);
       dm.refreshNodes(conf);
       decommissionedNodes.add(downnode);
-      Thread.sleep(5000);
+      BlockManagerTestUtil.recheckDecommissionState(dm);
       final List<DatanodeDescriptor> decommissioningNodes = dm.getDecommissioningNodes();
       if (iteration == 0) {
         assertEquals(decommissioningNodes.size(), 1);
         DatanodeDescriptor decommNode = decommissioningNodes.get(0);
-        checkDecommissionStatus(decommNode, 4, 0, 2);
+        checkDecommissionStatus(decommNode, 3, 0, 1);
         checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 1),
             fileSys, admin);
       } else {
         assertEquals(decommissioningNodes.size(), 2);
         DatanodeDescriptor decommNode1 = decommissioningNodes.get(0);
         DatanodeDescriptor decommNode2 = decommissioningNodes.get(1);
-        checkDecommissionStatus(decommNode1, 4, 4, 2);
+        // This one is still 3,3,1 since it passed over the UC block 
+        // earlier, before node 2 was decommed
+        checkDecommissionStatus(decommNode1, 3, 3, 1);
+        // This one is 4,4,2 since it has the full state
         checkDecommissionStatus(decommNode2, 4, 4, 2);
         checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 2),
             fileSys, admin);
@@ -305,8 +320,7 @@ public class TestDecommissioningStatus {
    * the replication process after it rejoins the cluster.
    */
   @Test(timeout=120000)
-  public void testDecommissionStatusAfterDNRestart()
-      throws IOException, InterruptedException {
+  public void testDecommissionStatusAfterDNRestart() throws Exception {
     DistributedFileSystem fileSys =
         (DistributedFileSystem)cluster.getFileSystem();
 
@@ -345,7 +359,7 @@ public class TestDecommissioningStatus {
     BlockManagerTestUtil.checkHeartbeat(fsn.getBlockManager());
 
     // Force DatanodeManager to check decommission state.
-    BlockManagerTestUtil.checkDecommissionState(dm, dead.get(0));
+    BlockManagerTestUtil.recheckDecommissionState(dm);
 
     // Verify that the DN remains in DECOMMISSION_INPROGRESS state.
     assertTrue("the node should be DECOMMISSION_IN_PROGRESSS",
@@ -359,7 +373,7 @@ public class TestDecommissioningStatus {
     // Delete the under-replicated file, which should let the 
     // DECOMMISSION_IN_PROGRESS node become DECOMMISSIONED
     cleanupFile(fileSys, f);
-    BlockManagerTestUtil.checkDecommissionState(dm, dead.get(0));
+    BlockManagerTestUtil.recheckDecommissionState(dm);
     assertTrue("the node should be decommissioned",
         dead.get(0).isDecommissioned());
 
@@ -380,8 +394,9 @@ public class TestDecommissioningStatus {
    * DECOMMISSIONED
    */
   @Test(timeout=120000)
-  public void testDecommissionDeadDN()
-      throws IOException, InterruptedException, TimeoutException {
+  public void testDecommissionDeadDN() throws Exception {
+    Logger log = Logger.getLogger(DecommissionManager.class);
+    log.setLevel(Level.DEBUG);
     DatanodeID dnID = cluster.getDataNodes().get(0).getDatanodeId();
     String dnName = dnID.getXferAddr();
     DataNodeProperties stoppedDN = cluster.stopDataNode(0);
@@ -392,7 +407,7 @@ public class TestDecommissioningStatus {
     DatanodeDescriptor dnDescriptor = dm.getDatanode(dnID);
     decommissionNode(fsn, localFileSys, dnName);
     dm.refreshNodes(conf);
-    BlockManagerTestUtil.checkDecommissionState(dm, dnDescriptor);
+    BlockManagerTestUtil.recheckDecommissionState(dm);
     assertTrue(dnDescriptor.isDecommissioned());
 
     // Add the node back

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0d32b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 409fffc..70deb1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -1305,7 +1305,7 @@ public class TestFsck {
           .getBlockManager().getBlockCollection(eb.getLocalBlock())
           .getBlocks()[0].getDatanode(0);
       cluster.getNameNode().getNamesystem().getBlockManager()
-          .getDatanodeManager().startDecommission(dn);
+          .getDatanodeManager().getDecomManager().startDecommission(dn);
       String dnName = dn.getXferAddr();
 
       //wait for decommission start

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0d32b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
index 426563b..35a611b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
@@ -30,8 +30,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DF;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSOutputStream;
@@ -240,7 +238,7 @@ public class TestNamenodeCapacityReport {
         DatanodeDescriptor dnd =
             dnm.getDatanode(datanodes.get(i).getDatanodeId());
         expectedInServiceLoad -= dnd.getXceiverCount();
-        dnm.startDecommission(dnd);
+        dnm.getDecomManager().startDecommission(dnd);
         DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
         Thread.sleep(100);
         checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad);


[17/49] hadoop git commit: Adding 2.8 section in CHANGES.txt

Posted by zj...@apache.org.
Adding 2.8 section in CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3e69927
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3e69927
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3e69927

Branch: refs/heads/YARN-2928
Commit: b3e699271be292e06633e07962a02897d911ca89
Parents: dfd8da7
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Sun Mar 8 20:24:33 2015 -0700
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Sun Mar 8 20:24:33 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 ++++++++++++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     | 12 ++++++++++++
 hadoop-mapreduce-project/CHANGES.txt            | 12 ++++++++++++
 hadoop-yarn-project/CHANGES.txt                 | 12 ++++++++++++
 4 files changed, 48 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3e69927/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0af0beb..6f2c8c3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -426,6 +426,18 @@ Trunk (Unreleased)
 
     HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
 
+Release 2.8.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3e69927/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3cd6372..e106b1a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -313,6 +313,18 @@ Trunk (Unreleased)
     HDFS-4681. TestBlocksWithNotEnoughRacks#testCorruptBlockRereplicatedAcrossRacks 
     fails using IBM java (Ayappan via aw)
 
+Release 2.8.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3e69927/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 049b17d..8f06ac8 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -245,6 +245,18 @@ Trunk (Unreleased)
 
     MAPREDUCE-6078. native-task: fix gtest build on macosx (Binglin Chang)
 
+Release 2.8.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3e69927/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f28e932..da8b02e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -45,6 +45,18 @@ Trunk - Unreleased
     YARN-2428. LCE default banned user list should have yarn (Varun
     Saxena via aw)
 
+Release 2.8.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[45/49] hadoop git commit: HDFS-6833. DirectoryScanner should not register a deleting block with memory of DataNode. Contributed by Shinichi Yamashita

Posted by zj...@apache.org.
HDFS-6833.  DirectoryScanner should not register a deleting block with memory of DataNode.  Contributed by Shinichi Yamashita


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6dae6d12
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6dae6d12
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6dae6d12

Branch: refs/heads/YARN-2928
Commit: 6dae6d12ec5abb716e1501cd4e18b10ae7809b94
Parents: 06ce1d9
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Fri Mar 13 02:25:32 2015 +0800
Committer: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Committed: Fri Mar 13 02:25:32 2015 +0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../hdfs/server/datanode/DirectoryScanner.java  | 20 ++++---
 .../server/datanode/fsdataset/FsDatasetSpi.java |  5 ++
 .../impl/FsDatasetAsyncDiskService.java         | 31 +++++++++-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 41 +++++++++++++-
 .../server/datanode/SimulatedFSDataset.java     |  5 ++
 .../extdataset/ExternalDatasetImpl.java         |  5 ++
 .../fsdataset/impl/TestFsDatasetImpl.java       | 59 ++++++++++++++++++++
 8 files changed, 158 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dae6d12/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 07213dd..e52b849 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1139,6 +1139,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7830. DataNode does not release the volume lock when adding a volume
     fails. (Lei Xu via Colin P. Mccabe)
 
+    HDFS-6833.  DirectoryScanner should not register a deleting block with
+    memory of DataNode.  (Shinichi Yamashita via szetszwo)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dae6d12/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 01f967f..61dfb14 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -443,13 +443,14 @@ public class DirectoryScanner implements Runnable {
         int d = 0; // index for blockpoolReport
         int m = 0; // index for memReprot
         while (m < memReport.length && d < blockpoolReport.length) {
-          FinalizedReplica memBlock = memReport[Math.min(m, memReport.length - 1)];
-          ScanInfo info = blockpoolReport[Math.min(
-              d, blockpoolReport.length - 1)];
+          FinalizedReplica memBlock = memReport[m];
+          ScanInfo info = blockpoolReport[d];
           if (info.getBlockId() < memBlock.getBlockId()) {
-            // Block is missing in memory
-            statsRecord.missingMemoryBlocks++;
-            addDifference(diffRecord, statsRecord, info);
+            if (!dataset.isDeletingBlock(bpid, info.getBlockId())) {
+              // Block is missing in memory
+              statsRecord.missingMemoryBlocks++;
+              addDifference(diffRecord, statsRecord, info);
+            }
             d++;
             continue;
           }
@@ -495,8 +496,11 @@ public class DirectoryScanner implements Runnable {
                         current.getBlockId(), current.getVolume());
         }
         while (d < blockpoolReport.length) {
-          statsRecord.missingMemoryBlocks++;
-          addDifference(diffRecord, statsRecord, blockpoolReport[d++]);
+          if (!dataset.isDeletingBlock(bpid, blockpoolReport[d].getBlockId())) {
+            statsRecord.missingMemoryBlocks++;
+            addDifference(diffRecord, statsRecord, blockpoolReport[d]);
+          }
+          d++;
         }
         LOG.info(statsRecord.toString());
       } //end for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dae6d12/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
index 10c8369..5b183e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
@@ -543,4 +543,9 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
    * Check whether the block was pinned
    */
   public boolean getPinning(ExtendedBlock block) throws IOException;
+  
+  /**
+   * Confirm whether the block is deleting
+   */
+  public boolean isDeletingBlock(String bpid, long blockId);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dae6d12/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
index 13e854f..c1d3990 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
@@ -22,7 +22,10 @@ import java.io.File;
 import java.io.FileDescriptor;
 import java.io.IOException;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
@@ -64,9 +67,14 @@ class FsDatasetAsyncDiskService {
   private static final long THREADS_KEEP_ALIVE_SECONDS = 60; 
   
   private final DataNode datanode;
+  private final FsDatasetImpl fsdatasetImpl;
   private final ThreadGroup threadGroup;
   private Map<File, ThreadPoolExecutor> executors
       = new HashMap<File, ThreadPoolExecutor>();
+  private Map<String, Set<Long>> deletedBlockIds 
+      = new HashMap<String, Set<Long>>();
+  private static final int MAX_DELETED_BLOCKS = 64;
+  private int numDeletedBlocks = 0;
   
   /**
    * Create a AsyncDiskServices with a set of volumes (specified by their
@@ -75,8 +83,9 @@ class FsDatasetAsyncDiskService {
    * The AsyncDiskServices uses one ThreadPool per volume to do the async
    * disk operations.
    */
-  FsDatasetAsyncDiskService(DataNode datanode) {
+  FsDatasetAsyncDiskService(DataNode datanode, FsDatasetImpl fsdatasetImpl) {
     this.datanode = datanode;
+    this.fsdatasetImpl = fsdatasetImpl;
     this.threadGroup = new ThreadGroup(getClass().getSimpleName());
   }
 
@@ -286,7 +295,27 @@ class FsDatasetAsyncDiskService {
         LOG.info("Deleted " + block.getBlockPoolId() + " "
             + block.getLocalBlock() + " file " + blockFile);
       }
+      updateDeletedBlockId(block);
       IOUtils.cleanup(null, volumeRef);
     }
   }
+  
+  private synchronized void updateDeletedBlockId(ExtendedBlock block) {
+    Set<Long> blockIds = deletedBlockIds.get(block.getBlockPoolId());
+    if (blockIds == null) {
+      blockIds = new HashSet<Long>();
+      deletedBlockIds.put(block.getBlockPoolId(), blockIds);
+    }
+    blockIds.add(block.getBlockId());
+    numDeletedBlocks++;
+    if (numDeletedBlocks == MAX_DELETED_BLOCKS) {
+      for (Entry<String, Set<Long>> e : deletedBlockIds.entrySet()) {
+        String bpid = e.getKey();
+        Set<Long> bs = e.getValue();
+        fsdatasetImpl.removeDeletedBlocks(bpid, bs);
+        bs.clear();
+      }
+      numDeletedBlocks = 0;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dae6d12/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 0f28aa4..48ac6ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -237,6 +237,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   private volatile boolean fsRunning;
 
   final ReplicaMap volumeMap;
+  final Map<String, Set<Long>> deletingBlock;
   final RamDiskReplicaTracker ramDiskReplicaTracker;
   final RamDiskAsyncLazyPersistService asyncLazyPersistService;
 
@@ -298,8 +299,9 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
             VolumeChoosingPolicy.class), conf);
     volumes = new FsVolumeList(volumeFailureInfos, datanode.getBlockScanner(),
         blockChooserImpl);
-    asyncDiskService = new FsDatasetAsyncDiskService(datanode);
+    asyncDiskService = new FsDatasetAsyncDiskService(datanode, this);
     asyncLazyPersistService = new RamDiskAsyncLazyPersistService(datanode);
+    deletingBlock = new HashMap<String, Set<Long>>();
 
     for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
       addVolume(dataLocations, storage.getStorageDir(idx));
@@ -1795,7 +1797,12 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
               +  ". Parent not found for file " + f);
           continue;
         }
-        volumeMap.remove(bpid, invalidBlks[i]);
+        ReplicaInfo removing = volumeMap.remove(bpid, invalidBlks[i]);
+        addDeletingBlock(bpid, removing.getBlockId());
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Block file " + removing.getBlockFile().getName()
+              + " is to be deleted");
+        }
       }
 
       if (v.isTransientStorage()) {
@@ -3005,5 +3012,35 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     FileStatus fss = localFS.getFileStatus(new Path(f.getAbsolutePath()));
     return fss.getPermission().getStickyBit();
   }
+  
+  @Override
+  public boolean isDeletingBlock(String bpid, long blockId) {
+    synchronized(deletingBlock) {
+      Set<Long> s = deletingBlock.get(bpid);
+      return s != null ? s.contains(blockId) : false;
+    }
+  }
+  
+  public void removeDeletedBlocks(String bpid, Set<Long> blockIds) {
+    synchronized (deletingBlock) {
+      Set<Long> s = deletingBlock.get(bpid);
+      if (s != null) {
+        for (Long id : blockIds) {
+          s.remove(id);
+        }
+      }
+    }
+  }
+  
+  private void addDeletingBlock(String bpid, Long blockId) {
+    synchronized(deletingBlock) {
+      Set<Long> s = deletingBlock.get(bpid);
+      if (s == null) {
+        s = new HashSet<Long>();
+        deletingBlock.put(bpid, s);
+      }
+      s.add(blockId);
+    }
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dae6d12/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 8ae5415..f0dbd0f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -1318,5 +1318,10 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   public boolean getPinning(ExtendedBlock b) throws IOException {
     return blockMap.get(b.getBlockPoolId()).get(b.getLocalBlock()).pinned;
   }
+  
+  @Override
+  public boolean isDeletingBlock(String bpid, long blockId) {
+    throw new UnsupportedOperationException();
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dae6d12/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
index a3c9935..6653cca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
@@ -429,4 +429,9 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
   public boolean getPinning(ExtendedBlock block) throws IOException {
     return false;
   }
+  
+  @Override
+  public boolean isDeletingBlock(String bpid, long blockId) {
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dae6d12/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 3b47dd0..403cb2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -18,10 +18,14 @@
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import com.google.common.collect.Lists;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.Storage;
@@ -29,8 +33,11 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
 import org.apache.hadoop.hdfs.server.datanode.DNConf;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
+import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaHandler;
+import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
@@ -88,6 +95,8 @@ public class TestFsDatasetImpl {
   private DataNode datanode;
   private DataStorage storage;
   private FsDatasetImpl dataset;
+  
+  private final static String BLOCKPOOL = "BP-TEST";
 
   private static Storage.StorageDirectory createStorageDirectory(File root) {
     Storage.StorageDirectory sd = new Storage.StorageDirectory(root);
@@ -334,4 +343,54 @@ public class TestFsDatasetImpl {
 
     FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
   }
+  
+  @Test
+  public void testDeletingBlocks() throws IOException {
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
+    try {
+      cluster.waitActive();
+      DataNode dn = cluster.getDataNodes().get(0);
+      
+      FsDatasetImpl ds = (FsDatasetImpl) DataNodeTestUtils.getFSDataset(dn);
+      FsVolumeImpl vol = ds.getVolumes().get(0);
+
+      ExtendedBlock eb;
+      ReplicaInfo info;
+      List<Block> blockList = new ArrayList<Block>();
+      for (int i = 1; i <= 63; i++) {
+        eb = new ExtendedBlock(BLOCKPOOL, i, 1, 1000 + i);
+        info = new FinalizedReplica(
+            eb.getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
+        ds.volumeMap.add(BLOCKPOOL, info);
+        info.getBlockFile().createNewFile();
+        info.getMetaFile().createNewFile();
+        blockList.add(info);
+      }
+      ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {
+        // Nothing to do
+      }
+      assertTrue(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
+
+      blockList.clear();
+      eb = new ExtendedBlock(BLOCKPOOL, 64, 1, 1064);
+      info = new FinalizedReplica(
+          eb.getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
+      ds.volumeMap.add(BLOCKPOOL, info);
+      info.getBlockFile().createNewFile();
+      info.getMetaFile().createNewFile();
+      blockList.add(info);
+      ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {
+        // Nothing to do
+      }
+      assertFalse(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }


[12/49] hadoop git commit: HADOOP-11670. Regression: s3a auth setup broken. (Adam Budde via stevel)

Posted by zj...@apache.org.
HADOOP-11670. Regression: s3a auth setup broken. (Adam Budde via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64443490
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64443490
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64443490

Branch: refs/heads/YARN-2928
Commit: 64443490d7f189e8e42d284615f3814ef751395a
Parents: 608ebd5
Author: Steve Loughran <st...@apache.org>
Authored: Sun Mar 8 11:20:42 2015 -0700
Committer: Steve Loughran <st...@apache.org>
Committed: Sun Mar 8 11:22:16 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 .../org/apache/hadoop/fs/s3a/Constants.java     |  6 +++++-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 20 ++++++++++++++------
 .../src/site/markdown/tools/hadoop-aws/index.md | 10 +++++-----
 4 files changed, 26 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64443490/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 14cd75a..16002d5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1050,6 +1050,8 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream
     should be non static. (Sean Busbey via yliu)
 
+    HADOOP-11670. Regression: s3a auth setup broken. (Adam Budde via stevel)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64443490/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index e7462dc..3486dfb 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -18,8 +18,12 @@
 
 package org.apache.hadoop.fs.s3a;
 
-
 public class Constants {
+  // s3 access key
+  public static final String ACCESS_KEY = "fs.s3a.access.key";
+
+  // s3 secret key
+  public static final String SECRET_KEY = "fs.s3a.secret.key";
 
   // number of simultaneous connections to s3
   public static final String MAXIMUM_CONNECTIONS = "fs.s3a.connection.maximum";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64443490/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 1a30d6f..91a606c 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -32,8 +32,6 @@ import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.hadoop.fs.s3.S3Credentials;
-
 import com.amazonaws.AmazonClientException;
 import com.amazonaws.AmazonServiceException;
 import com.amazonaws.ClientConfiguration;
@@ -159,12 +157,22 @@ public class S3AFileSystem extends FileSystem {
         this.getWorkingDirectory());
 
     // Try to get our credentials or just connect anonymously
-    S3Credentials s3Credentials = new S3Credentials();
-    s3Credentials.initialize(name, conf);
+    String accessKey = conf.get(ACCESS_KEY, null);
+    String secretKey = conf.get(SECRET_KEY, null);
+
+    String userInfo = name.getUserInfo();
+    if (userInfo != null) {
+      int index = userInfo.indexOf(':');
+      if (index != -1) {
+        accessKey = userInfo.substring(0, index);
+        secretKey = userInfo.substring(index + 1);
+      } else {
+        accessKey = userInfo;
+      }
+    }
 
     AWSCredentialsProviderChain credentials = new AWSCredentialsProviderChain(
-        new BasicAWSCredentialsProvider(s3Credentials.getAccessKey(),
-                                        s3Credentials.getSecretAccessKey()),
+        new BasicAWSCredentialsProvider(accessKey, secretKey),
         new InstanceProfileCredentialsProvider(),
         new AnonymousAWSCredentialsProvider()
     );

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64443490/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index bf62634..e0389c0 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -141,12 +141,12 @@ If you do any of these: change your credentials immediately!
 ### Authentication properties
 
     <property>
-      <name>fs.s3a.awsAccessKeyId</name>
+      <name>fs.s3a.access.key</name>
       <description>AWS access key ID. Omit for Role-based authentication.</description>
     </property>
 
     <property>
-      <name>fs.s3a.awsSecretAccessKey</name>
+      <name>fs.s3a.secret.key</name>
       <description>AWS secret key. Omit for Role-based authentication.</description>
     </property>
 
@@ -411,13 +411,13 @@ Example:
       </property>
 
       <property>
-        <name>fs.s3a.awsAccessKeyId</name>
+        <name>fs.s3a.access.key</name>
         <description>AWS access key ID. Omit for Role-based authentication.</description>
-        <value>DONOTPCOMMITTHISKEYTOSCM</value>
+        <value>DONOTCOMMITTHISKEYTOSCM</value>
       </property>
   
       <property>
-        <name>fs.s3a.awsSecretAccessKey</name>
+        <name>fs.s3a.secret.key</name>
         <description>AWS secret key. Omit for Role-based authentication.</description>
         <value>DONOTEVERSHARETHISSECRETKEY!</value>
       </property>


[27/49] hadoop git commit: HADOOP-11226. Add a configuration to set ipc.Client's traffic class with IPTOS_LOWDELAY|IPTOS_RELIABILITY. Contributed by Gopal V.

Posted by zj...@apache.org.
HADOOP-11226. Add a configuration to set ipc.Client's traffic class with IPTOS_LOWDELAY|IPTOS_RELIABILITY. Contributed by Gopal V.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54639c7d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54639c7d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54639c7d

Branch: refs/heads/YARN-2928
Commit: 54639c7d7a34f4a46e8df50d57c79bab34b1ac07
Parents: c3003eb
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Tue Mar 10 13:08:29 2015 +0900
Committer: Tsuyoshi Ozawa <oz...@apache.org>
Committed: Tue Mar 10 13:08:29 2015 +0900

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../fs/CommonConfigurationKeysPublic.java       |  6 +++-
 .../main/java/org/apache/hadoop/ipc/Client.java | 33 ++++++++++++++++++--
 .../src/main/resources/core-default.xml         | 14 +++++++++
 4 files changed, 52 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54639c7d/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0fe5b7c..fa73ba1 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -432,6 +432,9 @@ Release 2.8.0 - UNRELEASED
 
   NEW FEATURES
 
+    HADOOP-11226. Add a configuration to set ipc.Client's traffic class with
+    IPTOS_LOWDELAY|IPTOS_RELIABILITY. (Gopal V via ozawa)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54639c7d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 00c8d78..470b4d0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -206,8 +206,12 @@ public class CommonConfigurationKeysPublic {
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  IPC_CLIENT_TCPNODELAY_KEY =
     "ipc.client.tcpnodelay";
-  /** Defalt value for IPC_CLIENT_TCPNODELAY_KEY */
+  /** Default value for IPC_CLIENT_TCPNODELAY_KEY */
   public static final boolean IPC_CLIENT_TCPNODELAY_DEFAULT = true;
+  /** Enable low-latency connections from the client */
+  public static final String   IPC_CLIENT_LOW_LATENCY = "ipc.client.low-latency";
+  /** Default value of IPC_CLIENT_LOW_LATENCY */
+  public static final boolean  IPC_CLIENT_LOW_LATENCY_DEFAULT = false;
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  IPC_SERVER_LISTEN_QUEUE_SIZE_KEY =
     "ipc.server.listen.queue.size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54639c7d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 32558bc..97b715b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -384,7 +384,8 @@ public class Client {
     private final RetryPolicy connectionRetryPolicy;
     private final int maxRetriesOnSasl;
     private int maxRetriesOnSocketTimeouts;
-    private boolean tcpNoDelay; // if T then disable Nagle's Algorithm
+    private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
+    private final boolean tcpLowLatency; // if T then use low-delay QoS
     private boolean doPing; //do we need to send ping message
     private int pingInterval; // how often sends ping to the server in msecs
     private ByteArrayOutputStream pingRequest; // ping message
@@ -413,6 +414,7 @@ public class Client {
       this.maxRetriesOnSasl = remoteId.getMaxRetriesOnSasl();
       this.maxRetriesOnSocketTimeouts = remoteId.getMaxRetriesOnSocketTimeouts();
       this.tcpNoDelay = remoteId.getTcpNoDelay();
+      this.tcpLowLatency = remoteId.getTcpLowLatency();
       this.doPing = remoteId.getDoPing();
       if (doPing) {
         // construct a RPC header with the callId as the ping callId
@@ -585,6 +587,20 @@ public class Client {
           this.socket.setTcpNoDelay(tcpNoDelay);
           this.socket.setKeepAlive(true);
           
+          if (tcpLowLatency) {
+            /*
+             * This allows intermediate switches to shape IPC traffic
+             * differently from Shuffle/HDFS DataStreamer traffic.
+             *
+             * IPTOS_RELIABILITY (0x04) | IPTOS_LOWDELAY (0x10)
+             *
+             * Prefer to optimize connect() speed & response latency over net
+             * throughput.
+             */
+            this.socket.setTrafficClass(0x04 | 0x10);
+            this.socket.setPerformancePreferences(1, 2, 0);
+          }
+
           /*
            * Bind the socket to the host specified in the principal name of the
            * client, to ensure Server matching address of the client connection
@@ -1549,6 +1565,7 @@ public class Client {
     // the max. no. of retries for socket connections on time out exceptions
     private final int maxRetriesOnSocketTimeouts;
     private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
+    private final boolean tcpLowLatency; // if T then use low-delay QoS
     private final boolean doPing; //do we need to send ping message
     private final int pingInterval; // how often sends ping to the server in msecs
     private String saslQop; // here for testing
@@ -1575,6 +1592,10 @@ public class Client {
       this.tcpNoDelay = conf.getBoolean(
           CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_KEY,
           CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_DEFAULT);
+      this.tcpLowLatency = conf.getBoolean(
+          CommonConfigurationKeysPublic.IPC_CLIENT_LOW_LATENCY,
+          CommonConfigurationKeysPublic.IPC_CLIENT_LOW_LATENCY_DEFAULT
+          );
       this.doPing = conf.getBoolean(
           CommonConfigurationKeys.IPC_CLIENT_PING_KEY,
           CommonConfigurationKeys.IPC_CLIENT_PING_DEFAULT);
@@ -1610,11 +1631,17 @@ public class Client {
     public int getMaxRetriesOnSocketTimeouts() {
       return maxRetriesOnSocketTimeouts;
     }
-    
+
+    /** disable nagle's algorithm */
     boolean getTcpNoDelay() {
       return tcpNoDelay;
     }
-    
+
+    /** use low-latency QoS bits over TCP */
+    boolean getTcpLowLatency() {
+      return tcpLowLatency;
+    }
+
     boolean getDoPing() {
       return doPing;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54639c7d/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 74390d8..1d531df 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -978,6 +978,20 @@ for ldap providers in the same way as above does.
 </property>
 
 <property>
+  <name>ipc.client.tcpnodelay</name>
+  <value>true</value>
+  <description>Use TCP_NODELAY flag to bypass Nagle's algorithm transmission delays.
+  </description>
+</property>
+
+<property>
+  <name>ipc.client.low-latency</name>
+  <value>false</value>
+  <description>Use low-latency QoS markers for IPC connections.
+  </description>
+</property>
+
+<property>
   <name>ipc.server.listen.queue.size</name>
   <value>128</value>
   <description>Indicates the length of the listen queue for servers accepting


[16/49] hadoop git commit: Moving CHANGES.txt entry for MAPREDUCE-5657 to branch-2.

Posted by zj...@apache.org.
Moving CHANGES.txt entry for MAPREDUCE-5657 to branch-2.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dfd8da77
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dfd8da77
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dfd8da77

Branch: refs/heads/YARN-2928
Commit: dfd8da777fee7f8582aab584df35d896cc9e2f86
Parents: 790aa67
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Mon Mar 9 12:04:44 2015 +0900
Committer: Tsuyoshi Ozawa <oz...@apache.org>
Committed: Mon Mar 9 12:05:08 2015 +0900

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfd8da77/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index d0d8216..049b17d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -181,9 +181,6 @@ Trunk (Unreleased)
     MAPREDUCE-6234. TestHighRamJob fails due to the change in MAPREDUCE-5785. 
     (Masatake Iwasaki via kasha)
 
-    MAPREDUCE-5657. Fix Javadoc errors caused by incorrect or illegal tags in doc
-    comments. (Akira AJISAKA via ozawa)
-
   BREAKDOWN OF MAPREDUCE-2841 (NATIVE TASK) SUBTASKS
 
     MAPREDUCE-5985. native-task: Fix build on macosx. Contributed by
@@ -417,6 +414,9 @@ Release 2.7.0 - UNRELEASED
     MAPREDUCE-6136. MRAppMaster doesn't shutdown file systems. (Brahma 
     Reddy Battula via ozawa)
 
+    MAPREDUCE-5657. Fix Javadoc errors caused by incorrect or illegal tags in doc
+    comments. (Akira AJISAKA and Andrew Purtell via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES