You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by jh...@apache.org on 2017/02/07 04:25:45 UTC

[01/23] hadoop git commit: YARN-6103. Log updates for ZKRMStateStore (Contributed by Daniel Sturman via Daniel Templeton)

Repository: hadoop
Updated Branches:
  refs/heads/YARN-5734 11e44bdda -> d88497d44


YARN-6103. Log updates for ZKRMStateStore (Contributed by Daniel Sturman via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87852b6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87852b6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87852b6e

Branch: refs/heads/YARN-5734
Commit: 87852b6ef4b9d973b7b3999974d41c8860fb1495
Parents: 11e44bd
Author: Daniel Templeton <te...@apache.org>
Authored: Mon Jan 30 21:44:48 2017 -0800
Committer: Daniel Templeton <te...@apache.org>
Committed: Mon Jan 30 21:44:48 2017 -0800

----------------------------------------------------------------------
 .../recovery/ZKRMStateStore.java                | 27 ++++++++++++--------
 1 file changed, 16 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87852b6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
index cf6380f..1212a91 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
@@ -610,8 +610,10 @@ public class ZKRMStateStore extends RMStateStore {
     } else {
       safeCreate(nodeUpdatePath, appStateData, zkAcl,
           CreateMode.PERSISTENT);
-      LOG.debug(appId + " znode didn't exist. Created a new znode to"
-          + " update the application state.");
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(appId + " znode didn't exist. Created a new znode to"
+            + " update the application state.");
+      }
     }
   }
 
@@ -655,8 +657,10 @@ public class ZKRMStateStore extends RMStateStore {
     } else {
       safeCreate(nodeUpdatePath, attemptStateData, zkAcl,
           CreateMode.PERSISTENT);
-      LOG.debug(appAttemptId + " znode didn't exist. Created a new znode to"
-          + " update the application attempt state.");
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(appAttemptId + " znode didn't exist. Created a new znode to"
+            + " update the application attempt state.");
+      }
     }
   }
 
@@ -736,7 +740,9 @@ public class ZKRMStateStore extends RMStateStore {
     } else {
       // in case znode doesn't exist
       addStoreOrUpdateOps(trx, rmDTIdentifier, renewDate, false);
-      LOG.debug("Attempted to update a non-existing znode " + nodeRemovePath);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Attempted to update a non-existing znode " + nodeRemovePath);
+      }
     }
 
     trx.commit();
@@ -753,12 +759,12 @@ public class ZKRMStateStore extends RMStateStore {
     ByteArrayOutputStream seqOs = new ByteArrayOutputStream();
 
     try (DataOutputStream seqOut = new DataOutputStream(seqOs)) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug((isUpdate ? "Storing " : "Updating ") + "RMDelegationToken_"
-            + rmDTIdentifier.getSequenceNumber());
-      }
 
       if (isUpdate) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Updating RMDelegationToken_"
+              + rmDTIdentifier.getSequenceNumber());
+        }
         trx.setData(nodeCreatePath, identifierData.toByteArray(), -1);
       } else {
         trx.create(nodeCreatePath, identifierData.toByteArray(), zkAcl,
@@ -767,8 +773,7 @@ public class ZKRMStateStore extends RMStateStore {
         seqOut.writeInt(rmDTIdentifier.getSequenceNumber());
 
         if (LOG.isDebugEnabled()) {
-          LOG.debug((isUpdate ? "Storing " : "Updating ")
-              + dtSequenceNumberPath + ". SequenceNumber: "
+          LOG.debug("Storing " + dtSequenceNumberPath + ". SequenceNumber: "
               + rmDTIdentifier.getSequenceNumber());
         }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/23] hadoop git commit: HADOOP-14045. Aliyun OSS documentation missing from website. Contributed by Yiqun Lin.

Posted by jh...@apache.org.
HADOOP-14045. Aliyun OSS documentation missing from website. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a942eed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a942eed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a942eed

Branch: refs/heads/YARN-5734
Commit: 2a942eed214015379ba26854e54372d9a919e6fb
Parents: 6aa09dc
Author: Yiqun Lin <yq...@apache.org>
Authored: Thu Feb 2 10:07:24 2017 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Thu Feb 2 10:07:24 2017 +0800

----------------------------------------------------------------------
 hadoop-project/src/site/site.xml | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a942eed/hadoop-project/src/site/site.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 953f8f0..ee52a96 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -148,6 +148,7 @@
     </menu>
     
     <menu name="Hadoop Compatible File Systems" inherit="top">
+      <item name="Aliyun OSS" href="hadoop-aliyun/tools/hadoop-aliyun/index.html"/>
       <item name="Amazon S3" href="hadoop-aws/tools/hadoop-aws/index.html"/>
       <item name="Azure Blob Storage" href="hadoop-azure/index.html"/>
       <item name="Azure Data Lake Storage"


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/23] hadoop git commit: HADOOP-14044. Synchronization issue in delegation token cancel functionality. Contributed by Hrishikesh Gadre.

Posted by jh...@apache.org.
HADOOP-14044. Synchronization issue in delegation token cancel functionality. Contributed by Hrishikesh Gadre.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba75bc75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba75bc75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba75bc75

Branch: refs/heads/YARN-5734
Commit: ba75bc759334c8987e5f7dd4b21d025f0cccbde7
Parents: e023584
Author: Xiao Chen <xi...@apache.org>
Authored: Fri Feb 3 17:13:53 2017 -0800
Committer: Xiao Chen <xi...@apache.org>
Committed: Fri Feb 3 17:13:53 2017 -0800

----------------------------------------------------------------------
 .../ZKDelegationTokenSecretManager.java         | 33 ++++++++++++++------
 1 file changed, 23 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba75bc75/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
index 6c66e98..4a7ddb2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
@@ -670,6 +670,26 @@ public abstract class ZKDelegationTokenSecretManager<TokenIdent extends Abstract
     return tokenInfo;
   }
 
+  /**
+   * This method synchronizes the state of a delegation token information in
+   * local cache with its actual value in Zookeeper.
+   *
+   * @param ident Identifier of the token
+   */
+  private synchronized void syncLocalCacheWithZk(TokenIdent ident) {
+    try {
+      DelegationTokenInformation tokenInfo = getTokenInfoFromZK(ident);
+      if (tokenInfo != null && !currentTokens.containsKey(ident)) {
+        currentTokens.put(ident, tokenInfo);
+      } else if (tokenInfo == null && currentTokens.containsKey(ident)) {
+        currentTokens.remove(ident);
+      }
+    } catch (IOException e) {
+      LOG.error("Error retrieving tokenInfo [" + ident.getSequenceNumber()
+          + "] from ZK", e);
+    }
+  }
+
   private DelegationTokenInformation getTokenInfoFromZK(TokenIdent ident)
       throws IOException {
     return getTokenInfoFromZK(ident, false);
@@ -851,16 +871,9 @@ public abstract class ZKDelegationTokenSecretManager<TokenIdent extends Abstract
     DataInputStream in = new DataInputStream(buf);
     TokenIdent id = createIdentifier();
     id.readFields(in);
-    try {
-      if (!currentTokens.containsKey(id)) {
-        // See if token can be retrieved and placed in currentTokens
-        getTokenInfo(id);
-      }
-      return super.cancelToken(token, canceller);
-    } catch (Exception e) {
-      LOG.error("Exception while checking if token exist !!", e);
-      return id;
-    }
+
+    syncLocalCacheWithZk(id);
+    return super.cancelToken(token, canceller);
   }
 
   private void addOrUpdateToken(TokenIdent ident,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/23] hadoop git commit: HDFS-11377. Balancer hung due to no available mover threads. Contributed by yunjiong zhao.

Posted by jh...@apache.org.
HDFS-11377. Balancer hung due to no available mover threads. Contributed by yunjiong zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9cbbd1ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9cbbd1ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9cbbd1ea

Branch: refs/heads/YARN-5734
Commit: 9cbbd1eae893b21212c9bc9e6745c6859317a667
Parents: cce35c3
Author: Yiqun Lin <yq...@apache.org>
Authored: Mon Feb 6 13:15:16 2017 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Mon Feb 6 13:15:16 2017 +0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cbbd1ea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index 6b153da..ceccff5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -88,7 +88,6 @@ import com.google.common.base.Preconditions;
 public class Dispatcher {
   static final Log LOG = LogFactory.getLog(Dispatcher.class);
 
-  private static final int MAX_NO_PENDING_MOVE_ITERATIONS = 5;
   /**
    * the period of time to delay the usage of a DataNode after hitting
    * errors when using it for migrating data
@@ -1108,6 +1107,8 @@ public class Dispatcher {
     }
     if (moveExecutor == null) {
       LOG.warn("No mover threads available: skip moving " + p);
+      targetDn.removePendingBlock(p);
+      p.proxySource.removePendingBlock(p);
       return;
     }
     moveExecutor.execute(new Runnable() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/23] hadoop git commit: YARN-5866. Fix few issues reported by jshint in new YARN UI. Contributed by Akhil P B.

Posted by jh...@apache.org.
YARN-5866. Fix few issues reported by jshint in new YARN UI. Contributed by Akhil P B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c6bae5a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c6bae5a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c6bae5a

Branch: refs/heads/YARN-5734
Commit: 4c6bae5a223f8e4f7ee302b506bbf4fcb67fb07b
Parents: 87852b6
Author: Sunil G <su...@apache.org>
Authored: Tue Jan 31 14:18:31 2017 +0530
Committer: Sunil G <su...@apache.org>
Committed: Tue Jan 31 14:18:31 2017 +0530

----------------------------------------------------------------------
 .../hadoop-yarn-ui/src/main/webapp/.jshintrc    |  5 ++-
 .../src/main/webapp/app/adapters/abstract.js    |  1 +
 .../main/webapp/app/adapters/cluster-info.js    |  4 +--
 .../main/webapp/app/adapters/cluster-metric.js  |  4 +--
 .../webapp/app/adapters/yarn-app-attempt.js     |  4 +--
 .../src/main/webapp/app/adapters/yarn-app.js    |  6 ++--
 .../webapp/app/adapters/yarn-container-log.js   |  4 +--
 .../main/webapp/app/adapters/yarn-container.js  | 12 +++----
 .../src/main/webapp/app/adapters/yarn-node.js   |  4 +--
 .../src/main/webapp/app/adapters/yarn-queue.js  |  4 +--
 .../main/webapp/app/adapters/yarn-rm-node.js    |  6 ++--
 .../app/components/app-usage-donut-chart.js     |  6 ++--
 .../src/main/webapp/app/components/bar-chart.js | 36 +++++++++-----------
 .../app/components/base-chart-component.js      |  8 ++---
 .../app/components/base-usage-donut-chart.js    | 11 +++---
 .../main/webapp/app/components/donut-chart.js   | 10 +++---
 .../main/webapp/app/components/nodes-heatmap.js | 21 ++++++------
 ...er-app-memusage-by-nodes-stacked-barchart.js |  4 +--
 ...app-ncontainers-by-nodes-stacked-barchart.js |  4 +--
 .../app/components/queue-usage-donut-chart.js   |  4 +--
 .../main/webapp/app/components/queue-view.js    | 11 +++---
 .../main/webapp/app/components/simple-table.js  |  6 ++--
 .../webapp/app/components/stacked-barchart.js   | 23 +++++++------
 .../main/webapp/app/components/timeline-view.js | 25 ++++++++------
 .../main/webapp/app/components/tree-selector.js | 33 +++++++++---------
 .../main/webapp/app/controllers/application.js  |  8 ++---
 .../src/main/webapp/app/helpers/divide.js       |  2 +-
 .../main/webapp/app/helpers/log-files-comma.js  |  5 ++-
 .../src/main/webapp/app/helpers/node-link.js    |  2 +-
 .../src/main/webapp/app/helpers/node-menu.js    | 10 +++---
 .../src/main/webapp/app/helpers/node-name.js    |  4 +--
 .../src/main/webapp/app/initializers/loader.js  |  8 +++--
 .../main/webapp/app/models/yarn-app-attempt.js  | 17 ++++-----
 .../src/main/webapp/app/models/yarn-app.js      |  8 ++---
 .../src/main/webapp/app/models/yarn-node-app.js |  8 ++---
 .../webapp/app/models/yarn-node-container.js    | 12 +++----
 .../src/main/webapp/app/models/yarn-queue.js    | 14 ++++----
 .../src/main/webapp/app/models/yarn-rm-node.js  | 10 +++---
 .../src/main/webapp/app/models/yarn-user.js     |  2 +-
 .../src/main/webapp/app/routes/application.js   |  3 +-
 .../main/webapp/app/routes/yarn-app-attempts.js |  2 --
 .../src/main/webapp/app/routes/yarn-app.js      |  2 +-
 .../webapp/app/routes/yarn-container-log.js     |  2 +-
 .../main/webapp/app/routes/yarn-queues/index.js |  2 ++
 .../webapp/app/serializers/yarn-app-attempt.js  | 23 ++++++-------
 .../src/main/webapp/app/serializers/yarn-app.js | 13 +++----
 .../app/serializers/yarn-container-log.js       |  3 +-
 .../webapp/app/serializers/yarn-container.js    | 22 +++++-------
 .../webapp/app/serializers/yarn-node-app.js     | 24 ++-----------
 .../app/serializers/yarn-node-container.js      |  7 ++--
 .../main/webapp/app/serializers/yarn-node.js    |  8 ++---
 .../main/webapp/app/serializers/yarn-queue.js   | 10 +++---
 .../main/webapp/app/serializers/yarn-rm-node.js |  7 ++--
 .../src/main/webapp/app/utils/color-utils.js    |  6 ++--
 .../src/main/webapp/app/utils/converter.js      |  2 +-
 .../main/webapp/app/utils/href-address-utils.js |  2 --
 .../src/main/webapp/app/utils/mock.js           |  4 +--
 .../src/main/webapp/app/utils/sorter.js         |  8 ++---
 .../src/main/webapp/ember-cli-build.js          |  2 +-
 .../unit/models/yarn-container-log-test.js      |  1 +
 .../tests/unit/models/yarn-node-app-test.js     |  1 +
 .../unit/models/yarn-node-container-test.js     |  1 +
 .../webapp/tests/unit/models/yarn-node-test.js  |  1 +
 .../tests/unit/models/yarn-rm-node-test.js      |  1 +
 .../unit/routes/yarn-container-log-test.js      | 24 ++++++-------
 .../tests/unit/routes/yarn-node-app-test.js     | 28 +++++++--------
 .../tests/unit/routes/yarn-node-apps-test.js    | 30 ++++++++--------
 .../unit/routes/yarn-node-container-test.js     | 28 +++++++--------
 .../unit/routes/yarn-node-containers-test.js    | 30 ++++++++--------
 .../webapp/tests/unit/routes/yarn-node-test.js  |  4 +--
 .../webapp/tests/unit/routes/yarn-nodes-test.js |  2 +-
 71 files changed, 313 insertions(+), 356 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.jshintrc
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.jshintrc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.jshintrc
index 08096ef..63cfe40 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.jshintrc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.jshintrc
@@ -2,7 +2,10 @@
   "predef": [
     "document",
     "window",
-    "-Promise"
+    "-Promise",
+    "d3",
+    "$",
+    "moment"
   ],
   "browser": true,
   "boss": true,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/abstract.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/abstract.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/abstract.js
index c7e5c36..0db27a6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/abstract.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/abstract.js
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 import Ember from 'ember';
+import DS from 'ember-data';
 
 export default DS.JSONAPIAdapter.extend({
   address: null, //Must be set by inheriting classes

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-info.js
index f79df92..0665499 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-info.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-info.js
@@ -24,7 +24,7 @@ export default AbstractAdapter.extend({
   serverName: "RM",
 
   // Any cluster-info specific adapter changes must be added here
-  pathForType(modelName) {
+  pathForType(/*modelName*/) {
     return ''; // move to some common place, return path by modelname.
-  },
+  }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-metric.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-metric.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-metric.js
index 8325a4c..f431340 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-metric.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-metric.js
@@ -24,7 +24,7 @@ export default AbstractAdapter.extend({
   serverName: "RM",
 
   // Any cluster-metric specific adapter changes must be added here
-  pathForType(modelName) {
+  pathForType(/*modelName*/) {
     return ''; // move to some common place, return path by modelname.
-  },
+  }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-attempt.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-attempt.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-attempt.js
index 78f5e02..2389f16 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-attempt.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app-attempt.js
@@ -24,12 +24,12 @@ export default AbstractAdapter.extend({
   restNameSpace: "cluster",
   serverName: "RM",
 
-  urlForQuery(query, modelName) {
+  urlForQuery(query/*, modelName*/) {
     var url = this._buildURL();
     return url + '/apps/' + query.appId + "/appattempts";
   },
 
-  urlForFindRecord(id, modelName, snapshot) {
+  urlForFindRecord(id/*, modelName, snapshot*/) {
     var url = this._buildURL();
     return url + '/apps/' +
            Converter.attemptIdToAppId(id) + "/appattempts/" + id;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app.js
index 67a2847..b34c606 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app.js
@@ -23,7 +23,7 @@ export default AbstractAdapter.extend({
   restNameSpace: "cluster",
   serverName: "RM",
 
-  urlForQuery(query, modelName) {
+  urlForQuery(query/*, modelName*/) {
     var url = this._buildURL();
     if (query.state) {
       url = url + '/apps/?state=' + query.state;
@@ -31,13 +31,13 @@ export default AbstractAdapter.extend({
     return url;
   },
 
-  urlForFindRecord(id, modelName, snapshot) {
+  urlForFindRecord(id/*, modelName, snapshot*/) {
     var url = this._buildURL();
     url = url + '/apps/' + id;
     return url;
   },
 
-  pathForType(modelName) {
+  pathForType(/*modelName*/) {
     return 'apps'; // move to some common place, return path by modelname.
   },
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
index 9f2d5d7..8d1b12b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js
@@ -37,7 +37,7 @@ export default DS.RESTAdapter.extend({
     return this.get(`env.app.namespaces.node`);
   }),
 
-  urlForFindRecord(id, modelName, snapshot) {
+  urlForFindRecord(id/*, modelName, snapshot*/) {
     var splits = Converter.splitForContainerLogs(id);
     var nodeHttpAddr = splits[0];
     var containerId = splits[1];
@@ -68,7 +68,7 @@ export default DS.RESTAdapter.extend({
     hash.context = this;
 
     var headers = Ember.get(this, 'headers');
-    if (headers != undefined) {
+    if (headers !== undefined) {
       hash.beforeSend = function (xhr) {
         Object.keys(headers).forEach(function (key) {
           return xhr.setRequestHeader(key, headers[key]);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container.js
index fd6a6f8..348a805 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container.js
@@ -25,14 +25,14 @@ export default DS.JSONAPIAdapter.extend({
   },
 
   host: function() {
-    return undefined
+    return undefined;
   }.property(),
 
   namespace: function() {
-    return undefined
+    return undefined;
   }.property(),
 
-  urlForQuery(query, modelName) {
+  urlForQuery(query/*, modelName*/) {
     var rmHosts = this.get(`hosts.rmWebAddress`);
     var tsHosts = this.get(`hosts.timelineWebAddress`);
     var rmNamespaces = this.get(`env.app.namespaces.cluster`);
@@ -47,8 +47,8 @@ export default DS.JSONAPIAdapter.extend({
     }
 
     var url = this._buildURL();
-    url = url + '/apps/' + Converter.attemptIdToAppId(query.app_attempt_id) 
-               + "/appattempts/" + query.app_attempt_id + "/containers";
+    url = url + '/apps/' + Converter.attemptIdToAppId(query.app_attempt_id) +
+          "/appattempts/" + query.app_attempt_id + "/containers";
     console.log(url);
     return url;
   },
@@ -58,6 +58,6 @@ export default DS.JSONAPIAdapter.extend({
     hash.crossDomain = true;
     hash.xhrFields = {withCredentials: true};
     hash.targetServer = "RM";
-    return this._super(url, method, hash); 
+    return this._super(url, method, hash);
   }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-node.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-node.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-node.js
index 5bcfc9a..1436bc1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-node.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-node.js
@@ -24,10 +24,10 @@ export default AbstractAdapter.extend({
   restNameSpace: "node",
   serverName: "NM",
 
-  urlForFindRecord(id, modelName, snapshot) {
+  urlForFindRecord(id/*, modelName, snapshot*/) {
     var url = this._buildURL();
     url = url.replace("{nodeAddress}", id);
     return url;
-  },
+  }
 
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
index 41cd442..f2017df 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
@@ -23,8 +23,8 @@ export default AbstractAdapter.extend({
   restNameSpace: "cluster",
   serverName: "RM",
 
-  pathForType(modelName) {
+  pathForType(/*modelName*/) {
     return 'scheduler'; // move to some common place, return path by modelname.
-  },
+  }
 
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-rm-node.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-rm-node.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-rm-node.js
index a24c399..6783ded 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-rm-node.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-rm-node.js
@@ -23,14 +23,14 @@ export default AbstractAdapter.extend({
   restNameSpace: "cluster",
   serverName: "RM",
 
-  pathForType(modelName) {
+  pathForType(/*modelName*/) {
     return 'nodes';
   },
 
-  urlForFindRecord(id, modelName, snapshot) {
+  urlForFindRecord(id/*, modelName, snapshot*/) {
     var url = this._buildURL();
     url = url + "/nodes/" + id;
     return url;
-  },
+  }
 
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
index 90f41fc..274c219 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
@@ -16,8 +16,6 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
-import DonutChart from 'yarn-ui/components/donut-chart';
 import BaseUsageDonutChart from 'yarn-ui/components/base-usage-donut-chart';
 import ColorUtils from 'yarn-ui/utils/color-utils';
 import HrefAddressUtils from 'yarn-ui/utils/href-address-utils';
@@ -52,11 +50,11 @@ export default BaseUsageDonutChart.extend({
     usageByApps.push({
       label: "Available",
       value: avail.toFixed(4)
-    })
+    });
 
     this.colors = ColorUtils.getColors(usageByApps.length, ["others", "good"], true);
 
     this.renderDonutChart(usageByApps, this.get("title"), this.get("showLabels"),
       this.get("middleLabel"), "100%", "%");
   },
-})
\ No newline at end of file
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/bar-chart.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/bar-chart.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/bar-chart.js
index a5c49a9..05d78eb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/bar-chart.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/bar-chart.js
@@ -55,17 +55,15 @@ export default BaseChartComponent.extend({
       .domain([0, maxValue])
       .range([0, maxBarWidth]);
 
+    var getBarText = function(i) {
+      return data[i].label;
+    };
     // show bar text
-    for (var i = 0; i < data.length; i++) {
+    for (i = 0; i < data.length; i++) {
       g.append("text")
-        .text(
-          function() {
-            return data[i].label;
-          })
-        .attr("y", function() {
-          return layout.y1 + singleBarHeight / 2 + layout.margin + (gap +
-            singleBarHeight) * i + 30;
-        })
+        .text(getBarText(i))
+        .attr("y", layout.y1 + singleBarHeight / 2 + layout.margin +
+          (gap + singleBarHeight) * i + 30)
         .attr("x", layout.x1 + layout.margin);
     }
 
@@ -96,17 +94,15 @@ export default BaseChartComponent.extend({
         return w;
       });
 
+    var getBarValue = function(i) {
+      return data[i].value;
+    };
     // show bar value
-    for (var i = 0; i < data.length; i++) {
+    for (i = 0; i < data.length; i++) {
       g.append("text")
-        .text(
-          function() {
-            return data[i].value;
-          })
-        .attr("y", function() {
-          return layout.y1 + singleBarHeight / 2 + layout.margin + (gap +
-            singleBarHeight) * i + 30;
-        })
+        .text(getBarValue(i))
+        .attr("y", layout.y1 + singleBarHeight / 2 + layout.margin +
+              (gap + singleBarHeight) * i + 30)
         .attr("x", layout.x1 + layout.margin + textWidth + 15 + xScaler(data[i].value));
     }
   },
@@ -123,5 +119,5 @@ export default BaseChartComponent.extend({
   didInsertElement: function() {
     this.initChart();
     this.draw();
-  },
-})
\ No newline at end of file
+  }
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-chart-component.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-chart-component.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-chart-component.js
index d221488..d11a532 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-chart-component.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-chart-component.js
@@ -96,7 +96,7 @@ export default Ember.Component.extend({
   },
 
   bindTooltip: function(d) {
-    d.on("mouseover", function(d) {
+    d.on("mouseover", function() {
         this.tooltip
           .style("left", (d3.event.pageX) + "px")
           .style("top", (d3.event.pageY - 28) + "px");
@@ -108,16 +108,16 @@ export default Ember.Component.extend({
           data = d.data;
         }
 
-        this.tooltip.style("opacity", .9);
+        this.tooltip.style("opacity", 0.9);
         var value = data.value;
-        if (this.get("type") == "memory") {
+        if (this.get("type") === "memory") {
           value = Converter.memoryToSimpliedUnit(value);
         }
         this.tooltip.html(data.label + " = " + value)
           .style("left", (d3.event.pageX) + "px")
           .style("top", (d3.event.pageY - 28) + "px");
       }.bind(this))
-      .on("mouseout", function(d) {
+      .on("mouseout", function() {
         this.tooltip.style("opacity", 0);
       }.bind(this));
   },

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-usage-donut-chart.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-usage-donut-chart.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-usage-donut-chart.js
index bec06c9..201ae6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-usage-donut-chart.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-usage-donut-chart.js
@@ -16,7 +16,6 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
 import DonutChart from 'yarn-ui/components/donut-chart';
 
 export default DonutChart.extend({
@@ -29,15 +28,15 @@ export default DonutChart.extend({
     var others = {
       label: "Used by others",
       value: 0
-    }
+    };
 
-    for (var i = nItemsKept; i < usages.length; i++) {
+    for (i = nItemsKept; i < usages.length; i++) {
       others.value += Number(usages[i].value);
     }
     others.value = others.value.toFixed(2);
 
-    arr.push(others)
+    arr.push(others);
 
     return arr;
-  },
-})
\ No newline at end of file
+  }
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/donut-chart.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/donut-chart.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/donut-chart.js
index a2a21b3..e5699b4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/donut-chart.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/donut-chart.js
@@ -41,7 +41,7 @@ export default BaseChartComponent.extend({
     }
 
     if (!middleValue) {
-      if (this.get("type") == "memory") {
+      if (this.get("type") === "memory") {
         middleValue = Converter.memoryToSimpliedUnit(total);
       } else {
         middleValue = total;
@@ -111,7 +111,7 @@ export default BaseChartComponent.extend({
         if (allZero) {
           return this.colors[i];
         }
-      }.bind(this))
+      }.bind(this));
     this.bindTooltip(path);
     path.on("click", function (d) {
       var data = d.data;
@@ -119,7 +119,7 @@ export default BaseChartComponent.extend({
         this.tooltip.remove();
         document.location.href = data.link;
       }
-    }.bind(this))
+    }.bind(this));
 
     // Show labels
     if (showLabels) {
@@ -147,7 +147,7 @@ export default BaseChartComponent.extend({
         })
         .text(function(d) {
           var value = d.value;
-          if (this.get("type") == "memory") {
+          if (this.get("type") === "memory") {
             value = Converter.memoryToSimpliedUnit(value);
           }
           return d.label + ' = ' + value + suffix;
@@ -190,4 +190,4 @@ export default BaseChartComponent.extend({
     this.initChart();
     this.draw();
   },
-})
\ No newline at end of file
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js
index af8ceb3..5652834 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js
@@ -17,7 +17,6 @@
  */
 
 import BaseChartComponent from 'yarn-ui/components/base-chart-component';
-import Mock from 'yarn-ui/utils/mock';
 
 export default BaseChartComponent.extend({
   CELL_WIDTH: 250,
@@ -39,7 +38,7 @@ export default BaseChartComponent.extend({
         // Handle pie chart case
         var text = element.attr("tooltiptext");
 
-        this.tooltip.style("opacity", .9);
+        this.tooltip.style("opacity", 0.9);
         this.tooltip.html(text)
           .style("left", (d3.event.pageX) + "px")
           .style("top", (d3.event.pageY - 28) + "px");
@@ -82,10 +81,10 @@ export default BaseChartComponent.extend({
       2 * this.CELL_MARGIN;
     var sampleYOffset = layout.margin * 2;
 
-    for (var i = 1; i <= 5; i++) {
+    for (i = 1; i <= 5; i++) {
       var ratio = i * 0.2 - 0.1;
 
-      var rect = g.append("rect")
+      g.append("rect")
         .attr("x", sampleXOffset)
         .attr("y", sampleYOffset)
         .attr("fill", colorFunc(ratio))
@@ -101,14 +100,14 @@ export default BaseChartComponent.extend({
 
     var chartXOffset = -1;
 
-    for (var i = 0; i < racksArray.length; i++) {
+    for (i = 0; i < racksArray.length; i++) {
       var text = g.append("text")
         .text(racksArray[i])
         .attr("y", yOffset + this.CELL_HEIGHT / 2 + 5)
         .attr("x", layout.margin)
         .attr("class", "heatmap-rack");
 
-      if (-1 == chartXOffset) {
+      if (-1 === chartXOffset) {
         chartXOffset = layout.margin + text.node().getComputedTextLength() + 30;
       }
 
@@ -118,10 +117,10 @@ export default BaseChartComponent.extend({
         var rack = data[j].get("rack");
         var host = data[j].get("nodeHostName");
 
-        if (rack == racksArray[i]) {
+        if (rack === racksArray[i]) {
           if (!rack.includes(this.filter) && !host.includes(this.filter)) {
             this.addNode(g, xOffset, yOffset, colorFunc, data[j], false);
-            var text = g.append("text")
+            g.append("text")
               .text(host)
               .attr("y", yOffset + this.CELL_HEIGHT / 2 + 5)
               .attr("x", xOffset + this.CELL_WIDTH / 2)
@@ -151,7 +150,7 @@ export default BaseChartComponent.extend({
         xOffset += this.CELL_MARGIN + this.CELL_WIDTH;
       }
 
-      if (xOffset != chartXOffset) {
+      if (xOffset !== chartXOffset) {
         xOffset = chartXOffset;
         yOffset += this.CELL_MARGIN + this.CELL_HEIGHT;
       }
@@ -182,7 +181,7 @@ export default BaseChartComponent.extend({
   },
 
   addPlaceholderNode: function(g, xOffset, yOffset) {
-    var rect = g.append("rect")
+    g.append("rect")
       .attr("y", yOffset)
       .attr("x", xOffset)
       .attr("height", this.CELL_HEIGHT)
@@ -206,4 +205,4 @@ export default BaseChartComponent.extend({
       this.didInsertElement();
     }
   }
-})
\ No newline at end of file
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-memusage-by-nodes-stacked-barchart.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-memusage-by-nodes-stacked-barchart.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-memusage-by-nodes-stacked-barchart.js
index 7feb7bb..65cbaf5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-memusage-by-nodes-stacked-barchart.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-memusage-by-nodes-stacked-barchart.js
@@ -28,7 +28,7 @@ export default StackedBarchart.extend({
       {
         used: Number(n.get("usedMemoryMB")),
         avail: Number(n.get("availMemoryMB"))
-      }
+      };
     });
 
     containers.forEach(function(c) {
@@ -85,4 +85,4 @@ export default StackedBarchart.extend({
       data, this.get("title"), ["Used by this app", "Used by other apps",
         "Available"]);
   },
-})
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-ncontainers-by-nodes-stacked-barchart.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-ncontainers-by-nodes-stacked-barchart.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-ncontainers-by-nodes-stacked-barchart.js
index 251f557..4e45052 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-ncontainers-by-nodes-stacked-barchart.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-ncontainers-by-nodes-stacked-barchart.js
@@ -29,7 +29,7 @@ export default StackedBarchart.extend({
     containers.forEach(function(c) {
       var nodeId = c.get("assignedNodeId");
       var n = nodeToContainers[nodeId];
-      if (undefined != n) {
+      if (undefined !== n) {
         nodeToContainers[nodeId] += 1;
       }
     });
@@ -64,4 +64,4 @@ export default StackedBarchart.extend({
     this.show(
       data, this.get("title"), ["Running containers from this app"]);
   },
-})
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-usage-donut-chart.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-usage-donut-chart.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-usage-donut-chart.js
index f5e7574..c939aaf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-usage-donut-chart.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-usage-donut-chart.js
@@ -16,8 +16,6 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
-import DonutChart from 'yarn-ui/components/donut-chart';
 import BaseUsageDonutChart from 'yarn-ui/components/base-usage-donut-chart';
 import ColorUtils from 'yarn-ui/utils/color-utils';
 import HrefAddressUtils from 'yarn-ui/utils/href-address-utils';
@@ -66,4 +64,4 @@ export default BaseUsageDonutChart.extend({
     this.initChart();
     this.draw();
   },
-})
\ No newline at end of file
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-view.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-view.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-view.js
index adedf9a..f5fb68d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-view.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-view.js
@@ -92,7 +92,7 @@ export default Ember.Component.extend(ChartUtilsMixin, {
     circle.on('mouseover', function () {
     }.bind(this));
     circle.on('mouseout', function () {
-      if (circle != this.queues.selectedQueueCircle) {
+      if (circle !== this.queues.selectedQueueCircle) {
         circle.style("fill", this.queueColors[0]);
       }
     }.bind(this));
@@ -143,7 +143,6 @@ export default Ember.Component.extend(ChartUtilsMixin, {
     // render queues
     this.queues.dataGroup = this.canvas.svg.append("g")
       .attr("id", "queues-g");
-    var rootQueue = undefined;
 
     if (this.queues.data) {
       this.renderQueue(this.queues.data['root'], 0, 0);
@@ -185,7 +184,7 @@ export default Ember.Component.extend(ChartUtilsMixin, {
   /*
    * data = [{label="xx", value=},{...}]
    */
-  renderTable: function (data, title, layout) {
+  renderTable: function (data) {
     d3.select("#main-svg")
       .append('table')
       .selectAll('tr')
@@ -254,7 +253,7 @@ export default Ember.Component.extend(ChartUtilsMixin, {
     for (var queueName in this.queues.data) {
       var q = this.queues.data[queueName];
       if ((!q.get("children")) || q.get("children")
-          .length == 0) {
+          .length === 0) {
         // it's a leafqueue
         leafQueueUsedCaps.push({
           label: q.get("name"),
@@ -274,10 +273,10 @@ export default Ember.Component.extend(ChartUtilsMixin, {
     var queue = this.queues.data[queueName];
     var idx = 0;
 
-    if (queue.get("name") == "root") {
+    if (queue.get("name") === "root") {
       this.renderLeafQueueUsedCapacities(this.getLayout(idx++));
     }
-    if (queue.get("name") != "root") {
+    if (queue.get("name") !== "root") {
       this.renderQueueCapacities(queue, this.getLayout(idx++));
     }
     if (queue.get("children") && queue.get("children")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/simple-table.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/simple-table.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/simple-table.js
index 359583d..c5dadc3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/simple-table.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/simple-table.js
@@ -48,11 +48,11 @@ export default Ember.Component.extend({
       var cols = this.get("colsOrder").split(' ');
       for (i = 0; i < cols.length; i++) {
         var col = cols[i].split(',');
-        if (col.length != 2) {
+        if (col.length !== 2) {
           continue;
         }
         var order = col[1].trim();
-        if (order != 'asc' && order != 'desc') {
+        if (order !== 'asc' && order !== 'desc') {
           continue;
         }
         var colOrder = [];
@@ -61,7 +61,7 @@ export default Ember.Component.extend({
         orderArr.push(colOrder);
       }
     }
-    if (orderArr.length == 0) {
+    if (orderArr.length === 0) {
       var defaultOrder = [0, 'asc'];
       orderArr.push(defaultOrder);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/stacked-barchart.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/stacked-barchart.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/stacked-barchart.js
index 4a121fe..e57d747 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/stacked-barchart.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/stacked-barchart.js
@@ -17,7 +17,6 @@
  */
 
 import BaseChartComponent from 'yarn-ui/components/base-chart-component';
-import Mock from 'yarn-ui/utils/mock';
 
 export default BaseChartComponent.extend({
   MAX_BAR_HEIGHT: 120,
@@ -37,7 +36,7 @@ export default BaseChartComponent.extend({
         // Handle pie chart case
         var text = element.attr("tooltiptext");
 
-        this.tooltip.style("opacity", .9);
+        this.tooltip.style("opacity", 0.9);
         this.tooltip.html(text)
           .style("left", (d3.event.pageX) + "px")
           .style("top", (d3.event.pageY - 28) + "px");
@@ -99,9 +98,13 @@ export default BaseChartComponent.extend({
 
     var maxValue = 0;
     var maxN = 0;
-    for (var i = 0; i < data.length; i++) {
+
+    var i = 0;
+    var j = 0;
+
+    for (i = 0; i < data.length; i++) {
       var total = 0;
-      for (var j = 0; j < data[i].length; j++) {
+      for (j = 0; j < data[i].length; j++) {
         total += data[i][j].value;
       }
 
@@ -121,14 +124,14 @@ export default BaseChartComponent.extend({
       return b[0].value - a[0].value;
     });
 
-    for (var i = 0; i < data.length; i++) {
-      if (i % nBarPerRow == 0) {
+    for (i = 0; i < data.length; i++) {
+      if (i % nBarPerRow === 0) {
         xOffset = layout.margin;
         yOffset += layout.margin + height;
       }
 
       var leftTopY = yOffset;
-      for (var j = 0; j < data[i].length; j++) {
+      for (j = 0; j < data[i].length; j++) {
         var dy = data[i][j].value * height / maxValue;
         if (dy > 0) {
           leftTopY = leftTopY - dy;
@@ -148,7 +151,7 @@ export default BaseChartComponent.extend({
         }
       }
 
-      if (data[i].length == 1) {
+      if (data[i].length === 1) {
         g.append("text")
           .text(data[i][0].value)
           .attr("y", leftTopY - 10)
@@ -165,7 +168,7 @@ export default BaseChartComponent.extend({
     this.renderTitleAndBG(g, title, layout, false);
   },
 
-  draw: function(data, title, textWidth) {
+  draw: function() {
     this.initChart(true);
     //Mock.initMockNodesData(this);
 
@@ -195,4 +198,4 @@ export default BaseChartComponent.extend({
       this.didInsertElement();
     }
   }
-})
\ No newline at end of file
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
index 516b114..d730a43 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
@@ -41,13 +41,15 @@ export default Ember.Component.extend({
   }.property(),
 
   setSelected: function(d) {
-    if (this._selected == d) {
+    var dom;
+
+    if (this._selected === d) {
       return;
     }
 
     // restore color
     if (this._selected) {
-      var dom = d3.select("#timeline-bar-" + this._selected.get("id"));
+      dom = d3.select("#timeline-bar-" + this._selected.get("id"));
       dom.attr("fill", this.colors[0]);
     }
 
@@ -164,19 +166,19 @@ export default Ember.Component.extend({
       .attr("y", function(d, i) {
         return border + (gap + singleBarHeight) * i;
       })
-      .attr("x", function(d, i) {
+      .attr("x", function(d) {
         return border + textWidth + xScaler(d.get("startTs"));
       })
       .attr("height", singleBarHeight)
-      .attr("fill", function(d, i) {
+      .attr("fill", function() {
         return this.colors[0];
       }.bind(this))
-      .attr("width", function(d, i) {
+      .attr("width", function(d) {
         var finishedTs = xScaler(d.get("finishedTs"));
         finishedTs = finishedTs > 0 ? finishedTs : xScaler(end);
         return finishedTs - xScaler(d.get("startTs"));
       })
-      .attr("id", function(d, i) {
+      .attr("id", function(d) {
         return "timeline-bar-" + d.get("id");
       });
     bar.on("click", function(d) {
@@ -198,18 +200,18 @@ export default Ember.Component.extend({
   },
 
   bindTooltip: function(d) {
-    d.on("mouseover", function(d) {
+    d.on("mouseover", function() {
         this.tooltip
           .style("left", (d3.event.pageX) + "px")
           .style("top", (d3.event.pageY - 28) + "px");
       }.bind(this))
       .on("mousemove", function(d) {
-        this.tooltip.style("opacity", .9);
+        this.tooltip.style("opacity", 0.9);
         this.tooltip.html(d.get("tooltipLabel"))
           .style("left", (d3.event.pageX) + "px")
           .style("top", (d3.event.pageY - 28) + "px");
       }.bind(this))
-      .on("mouseout", function(d) {
+      .on("mouseout", function() {
         this.tooltip.style("opacity", 0);
       }.bind(this));
   },
@@ -244,7 +246,7 @@ export default Ember.Component.extend({
       }.bind(this));
     }
 
-    if(this.modelArr.length == 0) {
+    if(this.modelArr.length === 0) {
       return;
     }
 
@@ -254,8 +256,9 @@ export default Ember.Component.extend({
 
       return tsA - tsB;
     });
+    var begin = 0;
     if (this.modelArr.length > 0) {
-      var begin = this.modelArr[0].get("startTs");
+      begin = this.modelArr[0].get("startTs");
     }
     var end = 0;
     for (var i = 0; i < this.modelArr.length; i++) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
index 5e7cfa0..c9e735d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
@@ -51,7 +51,7 @@ export default Ember.Component.extend({
         this.map[o.id] = o;
       }.bind(this));
 
-    var selected = this.get("selected");
+    // var selected = this.get("selected");
 
     this.initQueue("root", 1, this.treeData);
   },
@@ -125,9 +125,9 @@ export default Ember.Component.extend({
     // Enter any new nodes at the parent's previous position.
     var nodeEnter = node.enter().append("g")
       .attr("class", "node")
-      .attr("transform", function(d) { return "translate(" + source.y0 + "," + source.x0 + ")"; })
-      .on("mouseover", function(d,i){
-        if (d.queueData.get("name") != this.get("selected")) {
+      .attr("transform", function() { return "translate(" + source.y0 + "," + source.x0 + ")"; })
+      .on("mouseover", function(d){
+        if (d.queueData.get("name") !== this.get("selected")) {
             document.location.href = "#/yarn-queues/" + d.queueData.get("name");
         }
 
@@ -161,10 +161,10 @@ export default Ember.Component.extend({
 
     // append percentage
     nodeEnter.append("text")
-      .attr("x", function(d) { return 0; })
+      .attr("x", function() { return 0; })
       .attr("dy", ".35em")
       .attr("fill", "white")
-      .attr("text-anchor", function(d) { return "middle"; })
+      .attr("text-anchor", function() { return "middle"; })
       .text(function(d) {
         var usedCap = d.queueData.get("usedCapacity");
         if (usedCap >= 100.0) {
@@ -195,14 +195,14 @@ export default Ember.Component.extend({
           return "#/yarn-queues/" + d.queueData.get("name");
         })
       .style("stroke-width", function(d) {
-        if (d.queueData.get("name") == this.get("selected")) {
+        if (d.queueData.get("name") === this.get("selected")) {
           return 7;
         } else {
           return 2;
         }
       }.bind(this))
       .style("stroke", function(d) {
-        if (d.queueData.get("name") == this.get("selected")) {
+        if (d.queueData.get("name") === this.get("selected")) {
           return "gray";
         } else {
           return "gray";
@@ -215,7 +215,7 @@ export default Ember.Component.extend({
     // Transition exiting nodes to the parent's new position.
     var nodeExit = node.exit().transition()
       .duration(duration)
-      .attr("transform", function(d) { return "translate(" + source.y + "," + source.x + ")"; })
+      .attr("transform", function() { return "translate(" + source.y + "," + source.x + ")"; })
       .remove();
 
     nodeExit.select("circle")
@@ -231,9 +231,9 @@ export default Ember.Component.extend({
     // Enter any new links at the parent's previous position.
     link.enter().insert("path", "g")
       .attr("class", "link")
-      .attr("d", function(d) {
-      var o = {x: source.x0, y: source.y0};
-      return diagonal({source: o, target: o});
+      .attr("d", function() {
+        var o = {x: source.x0, y: source.y0};
+        return diagonal({source: o, target: o});
       });
 
     // Transition links to their new position.
@@ -244,9 +244,9 @@ export default Ember.Component.extend({
     // Transition exiting nodes to the parent's new position.
     link.exit().transition()
       .duration(duration)
-      .attr("d", function(d) {
-      var o = {x: source.x, y: source.y};
-      return diagonal({source: o, target: o});
+      .attr("d", function() {
+        var o = {x: source.x, y: source.y};
+        return diagonal({source: o, target: o});
       })
       .remove();
 
@@ -265,7 +265,6 @@ export default Ember.Component.extend({
     var treeHeight = this.numOfLeafQueue * INBETWEEN_HEIGHT;
     var width = treeWidth + margin.left + margin.right;
     var height = treeHeight + margin.top + margin.bottom;
-    var layout = { };
 
     if (this.mainSvg) {
       this.mainSvg.remove();
@@ -287,7 +286,7 @@ export default Ember.Component.extend({
     root.x0 = height / 2;
     root.y0 = 0;
 
-    d3.select(self.frameElement).style("height", height);
+    d3.select(window.frameElement).style("height", height);
 
     this.update(root, root, tree, diagonal);
   },

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
index 2effb13..08ca5a6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
@@ -29,22 +29,22 @@ export default Ember.Controller.extend({
   outputMainMenu: function(){
     var path = this.get('currentPath');
     var html = '<li';
-    if (path == 'yarn-queue') {
+    if (path === 'yarn-queue') {
       html = html + ' class="active"';
     }
     html = html + '><a href="yarn-queue/root">Queues<span class="sr-only">' +
         '(current)</span></a></li><li';
-    if (path.lastIndexOf('yarn-app', 0) == 0) {
+    if (path.lastIndexOf('yarn-app', 0) === 0) {
       html = html + ' class="active"';
     }
     html = html + '><a href="yarn-apps">Applications<span class="sr-only">' +
         '(current)</span></a></li><li';
-    if (path == 'cluster-overview') {
+    if (path === 'cluster-overview') {
       html = html + ' class="active"';
     }
     html = html + '><a href="cluster-overview">Cluster Overview<span class=' +
         '"sr-only">(current)</span></a></li><li';
-    if (path.lastIndexOf('yarn-node', 0) == 0) {
+    if (path.lastIndexOf('yarn-node', 0) === 0) {
       html = html + ' class="active"';
     }
     html = html + '><a href="yarn-nodes">Nodes<span class="sr-only">' +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/divide.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/divide.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/divide.js
index fcf64dd..437def8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/divide.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/divide.js
@@ -24,7 +24,7 @@ import Ember from 'ember';
 export default Ember.Helper.helper(function(params,hash) {
   var num = hash.num;
   var den = hash.den;
-  if (den == 0) {
+  if (den === 0) {
     return 0;
   }
   return Math.floor(num/den);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-files-comma.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-files-comma.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-files-comma.js
index 192e1ed..78dcf25 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-files-comma.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-files-comma.js
@@ -27,19 +27,18 @@ export default Ember.Helper.helper(function(params,hash) {
     return "";
   }
   var logFilesLen = logFiles.length;
-  if (logFilesLen == 0) {
+  if (logFilesLen === 0) {
     return "";
   }
   var nodeId = hash.nodeId;
   var nodeAddr = hash.nodeAddr;
   var containerId = hash.containerId;
   var html = '<td>';
-  var logFilesCommaSeparated = "";
   for (var i = 0; i < logFilesLen; i++) {
     html = html + '<a href="#/yarn-container-log/' + nodeId + '/' +
         nodeAddr + '/' + containerId + '/' + logFiles[i] + '">' + logFiles[i] +
         '</a>';
-    if (i != logFilesLen - 1) {
+    if (i !== logFilesLen - 1) {
       html = html + ",";
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-link.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-link.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-link.js
index e524f08..d71ac77 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-link.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-link.js
@@ -26,7 +26,7 @@ export default Ember.Helper.helper(function(params,hash) {
   var nodeHTTPAddress = hash.nodeHTTPAddress;
   var nodeId = hash.nodeId;
   var html = '<td>';
-  if (nodeState == "SHUTDOWN" || nodeState == "LOST") {
+  if (nodeState === "SHUTDOWN" || nodeState === "LOST") {
     html = html + nodeHTTPAddress;
   } else {
     html = html + '<a href="#/yarn-node/' + nodeId + "/" + nodeHTTPAddress + '">' +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-menu.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-menu.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-menu.js
index d4a73a4..e1eba5a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-menu.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-menu.js
@@ -25,7 +25,7 @@ export default Ember.Helper.helper(function(params,hash) {
   // Place a menu within a panel inside col-md-2 container.
   var nodeIdSplitAtPort = hash.nodeId;
   var portIndex = nodeIdSplitAtPort.indexOf(':');
-  if (portIndex != -1) {
+  if (portIndex !== -1) {
     nodeIdSplitAtPort = nodeIdSplitAtPort.substring(0, portIndex) +
         ':&#8203;' + nodeIdSplitAtPort.substring(portIndex + 1);
   }
@@ -35,7 +35,7 @@ export default Ember.Helper.helper(function(params,hash) {
     var len = splitsAlongDots.length;
     for (var i = 0; i < len; i++) {
       normalizedNodeId = normalizedNodeId + splitsAlongDots[i];
-      if (i != len - 1) {
+      if (i !== len - 1) {
         normalizedNodeId = normalizedNodeId + '.&#8203;';
       }
     }
@@ -47,17 +47,17 @@ export default Ember.Helper.helper(function(params,hash) {
       '<div class="panel-heading"><h4>Node Manager<br>(' + normalizedNodeId + ')</h4></div>'+
       '<div class="panel-body"><ul class="nav nav-pills nav-stacked" id="stacked-menu">' +
       '<ul class="nav nav-pills nav-stacked collapse in"><li';
-  if (hash.path == 'yarn-node') {
+  if (hash.path === 'yarn-node') {
     html = html + ' class="active"';
   }
   html = html + '><a href="#/yarn-node/' + hash.nodeId + '/' + hash.nodeAddr +
       '">Node Information</a></li><li';
-  if (hash.path == 'yarn-node-apps') {
+  if (hash.path === 'yarn-node-apps') {
     html = html + ' class="active"';
   }
   html = html + '><a href="#/yarn-node-apps/' + hash.nodeId + '/' + hash.nodeAddr +
       '">List of Applications</a></li><li';
-  if (hash.path == 'yarn-node-containers') {
+  if (hash.path === 'yarn-node-containers') {
     html = html + ' class="active"';
   }
   html = html + '><a href="#/yarn-node-containers/' +hash.nodeId + '/' + hash.nodeAddr +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-name.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-name.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-name.js
index 56ce373..75bc017 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-name.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/node-name.js
@@ -23,7 +23,7 @@ export function nodeName(params/*, hash*/) {
   console.log('nodes-uid', params[0]);
   var nodeIdSplitAtPort = params[0];
   var portIndex = nodeIdSplitAtPort.indexOf(':');
-  if (portIndex != -1) {
+  if (portIndex !== -1) {
     nodeIdSplitAtPort = nodeIdSplitAtPort.substring(0, portIndex) +
         ':&#8203;' + nodeIdSplitAtPort.substring(portIndex + 1);
   }
@@ -33,7 +33,7 @@ export function nodeName(params/*, hash*/) {
     var len = splitsAlongDots.length;
     for (var i = 0; i < len; i++) {
       normalizedNodeId = normalizedNodeId + splitsAlongDots[i];
-      if (i != len - 1) {
+      if (i !== len - 1) {
         normalizedNodeId = normalizedNodeId + '.&#8203;';
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
index 08e4dbd..aa8fb07 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -16,8 +16,11 @@
  * limitations under the License.
  */
 
+/* globals ENV: true */
 
-function getTimeLineURL(parameters) {
+import Ember from 'ember';
+
+function getTimeLineURL() {
   return '/conf?name=yarn.timeline-service.webapp.address';
 }
 
@@ -37,7 +40,6 @@ function updateConfigs(application) {
   }
 
   if(!ENV.hosts.timelineWebAddress) {
-    var result = [];
     var timelinehost = "";
     $.ajax({
       type: 'GET',
@@ -54,7 +56,7 @@ function updateConfigs(application) {
 
         Ember.Logger.log("Timeline Address from RM:" + address + ":" + port);
 
-        if(address == "0.0.0.0" || address == "localhost") {
+        if(address === "0.0.0.0" || address === "localhost") {
           var updatedAddress =  hostname + ":" + port;
 
           /* Timeline v2 is not supporting CORS, so make as default*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
index f30d143..c83eb2e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+import Ember from 'ember';
 import DS from 'ember-data';
 import Converter from 'yarn-ui/utils/converter';
 
@@ -45,8 +46,8 @@ export default DS.Model.extend({
   attemptStartedTime: function() {
     var startTime = this.get("startTime");
     // If startTime variable is not present, get from startedTime
-    if (startTime == undefined ||
-      startTime == "Invalid date") {
+    if (startTime === undefined ||
+      startTime === "Invalid date") {
       startTime = this.get("startedTime");
     }
 
@@ -80,7 +81,7 @@ export default DS.Model.extend({
   appMasterContainerId: function() {
     var id = this.get("containerId");
     // If containerId variable is not present, get from amContainerId
-    if (id == undefined) {
+    if (id === undefined) {
       id = this.get("amContainerId");
     }
     return id;
@@ -89,16 +90,16 @@ export default DS.Model.extend({
   IsAmNodeUrl: function() {
     var url = this.get("nodeHttpAddress");
       // If nodeHttpAddress variable is not present, hardcode it.
-    if (url == undefined) {
+    if (url === undefined) {
       url = "Not Available";
     }
-    return url != "Not Available";
+    return url !== "Not Available";
   }.property("nodeHttpAddress"),
 
   amNodeId : function() {
     var id = this.get("nodeId");
     // If nodeId variable is not present, get from host
-    if (id == undefined) {
+    if (id === undefined) {
       id = this.get("hosts");
     }
     return id;
@@ -107,10 +108,10 @@ export default DS.Model.extend({
   IsLinkAvailable: function() {
     var url = this.get("logsLink");
     // If logsLink variable is not present, hardcode its.
-    if (url == undefined) {
+    if (url === undefined) {
       url = "Not Available";
     }
-    return url != "Not Available";
+    return url !== "Not Available";
   }.property("logsLink"),
 
   elapsedTime: function() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
index 4138a87..ea1334f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js
@@ -54,7 +54,7 @@ export default DS.Model.extend({
   applicationExpiryTime: DS.attr('string'),
 
   isFailed: function() {
-    return this.get('finalStatus') == "FAILED"
+    return this.get('finalStatus') == "FAILED";
   }.property("finalStatus"),
 
   validatedFinishedTs: function() {
@@ -91,11 +91,11 @@ export default DS.Model.extend({
     var finalStatus = this.get("finalStatus");
     var style = "";
 
-    if (finalStatus == "KILLED") {
+    if (finalStatus === "KILLED") {
       style = "warning";
-    } else if (finalStatus == "FAILED") {
+    } else if (finalStatus === "FAILED") {
       style = "danger";
-    } else if (finalStatus == "SUCCEEDED") {
+    } else if (finalStatus === "SUCCEEDED") {
       style = "success";
     } else {
       style = "default";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-node-app.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-node-app.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-node-app.js
index 6dc69ae..bc2447f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-node-app.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-node-app.js
@@ -27,16 +27,16 @@ export default DS.Model.extend({
    * Indicates no rows were retrieved from backend
    */
   isDummyApp: function() {
-    return this.get('id') == "dummy";
+    return this.get('id') === "dummy";
   }.property("id"),
 
   appStateStyle: function() {
     var style = "default";
     var appState = this.get("state");
-    if (appState == "RUNNING" || appState == "FINISHING_CONTAINERS_WAIT" ||
-        appState == "APPLICATION_RESOURCES_CLEANINGUP") {
+    if (appState === "RUNNING" || appState === "FINISHING_CONTAINERS_WAIT" ||
+        appState === "APPLICATION_RESOURCES_CLEANINGUP") {
       style = "primary";
-    } else if (appState == "FINISHED") {
+    } else if (appState === "FINISHED") {
       style = "success";
     }
     return "label label-" + style;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-node-container.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-node-container.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-node-container.js
index 3ba3216..40e8447 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-node-container.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-node-container.js
@@ -32,24 +32,24 @@ export default DS.Model.extend({
    * Indicates that there was no container retrieved from backend.
    */
   isDummyContainer: function() {
-    return this.get('id') == "dummy";
+    return this.get('id') === "dummy";
   }.property("id"),
 
   containerStateStyle: function() {
     var style = "primary";
     var containerState = this.get('state');
     var containerExitCode = this.get('exitCode');
-    if (containerState == "DONE") {
-      if (containerExitCode == "0") {
+    if (containerState === "DONE") {
+      if (parseInt(containerExitCode) === 0) {
         style = "success";
-      } else if (containerExitCode != "N/A") {
+      } else if (containerExitCode !== "N/A") {
         style = "danger";
       }
     }
-    if (containerState == "EXITED_WITH_SUCCESS") {
+    if (containerState === "EXITED_WITH_SUCCESS") {
       style = "success";
     }
-    if (containerState == "EXITED_WITH_FAILURE") {
+    if (containerState === "EXITED_WITH_FAILURE") {
       style = "danger";
     }
     return "label label-" + style;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue.js
index 7de4ccc..27c48f7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue.js
@@ -48,17 +48,17 @@ export default DS.Model.extend({
     return [
       {
         label: "Absolute Capacity",
-        value: this.get("name") == "root" ? 100 : this.get("absCapacity")
+        value: this.get("name") === "root" ? 100 : this.get("absCapacity")
       },
       {
         label: "Absolute Used",
-        value: this.get("name") == "root" ? this.get("usedCapacity") : this.get("absUsedCapacity")
+        value: this.get("name") === "root" ? this.get("usedCapacity") : this.get("absUsedCapacity")
       },
       {
         label: "Absolute Max Capacity",
-        value: this.get("name") == "root" ? 100 : this.get("absMaxCapacity")
+        value: this.get("name") === "root" ? 100 : this.get("absMaxCapacity")
       }
-    ]
+    ];
   }.property("absCapacity", "absUsedCapacity", "absMaxCapacity"),
 
   userUsagesDonutChartData: function() {
@@ -68,7 +68,7 @@ export default DS.Model.extend({
         data.push({
           label: o.get("name"),
           value: o.get("usedMemoryMB")
-        })
+        });
       });
     }
 
@@ -89,6 +89,6 @@ export default DS.Model.extend({
         label: "Active Apps",
         value: this.get("numActiveApplications") || 0
       }
-    ]
-  }.property(),
+    ];
+  }.property()
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-rm-node.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-rm-node.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-rm-node.js
index a15a20f..c7875b4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-rm-node.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-rm-node.js
@@ -47,18 +47,18 @@ export default DS.Model.extend({
    * Indicates no rows were retrieved from backend
    */
   isDummyNode: function() {
-    return this.get('id') == "dummy";
+    return this.get('id') === "dummy";
   }.property("id"),
 
   nodeStateStyle: function() {
     var style = "default";
     var nodeState = this.get("state");
-    if (nodeState == "REBOOTED") {
+    if (nodeState === "REBOOTED") {
       style = "warning";
-    } else if (nodeState == "UNHEALTHY" || nodeState == "DECOMMISSIONED" ||
-          nodeState == "LOST" || nodeState == "SHUTDOWN") {
+    } else if (nodeState === "UNHEALTHY" || nodeState === "DECOMMISSIONED" ||
+          nodeState === "LOST" || nodeState === "SHUTDOWN") {
       style = "danger";
-    } else if (nodeState == "RUNNING") {
+    } else if (nodeState === "RUNNING") {
       style = "success";
     }
     return "label label-" + style;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-user.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-user.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-user.js
index 7cfd182..84b0fab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-user.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-user.js
@@ -23,4 +23,4 @@ export default DS.Model.extend({
   queueName: DS.attr('string'),
   usedMemoryMB: DS.attr('number'),
   usedVCore: DS.attr('number')
-})
\ No newline at end of file
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/application.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/application.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/application.js
index 07b3792..1fd11e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/application.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/application.js
@@ -29,8 +29,7 @@ export default Ember.Route.extend({
     error: function (error) {
       Ember.Logger.log(error.stack);
 
-      if (error && error.errors[0] &&
-          error.errors[0].status == 404) {
+      if (error && error.errors[0] && parseInt(error.errors[0].status) === 404) {
         this.intermediateTransitionTo('/notfound');
       } else {
         this.intermediateTransitionTo('/error');

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app-attempts.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app-attempts.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app-attempts.js
index 121debf..78ff1c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app-attempts.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app-attempts.js
@@ -16,8 +16,6 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
-
 import AbstractRoute from './abstract';
 
 export default AbstractRoute.extend({

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app.js
index 000b02f..86d845c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app.js
@@ -25,7 +25,7 @@ export default AbstractRoute.extend({
     return Ember.RSVP.hash({
       app: this.store.find('yarn-app', param.app_id),
 
-      rmContainers: this.store.find('yarn-app', param.app_id).then(function(app) {
+      rmContainers: this.store.find('yarn-app', param.app_id).then(function() {
         return this.store.query('yarn-app-attempt', {appId: param.app_id}).then(function (attempts) {
           if (attempts && attempts.get('firstObject')) {
             var appAttemptId = attempts.get('firstObject').get('appAttemptId');

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-container-log.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-container-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-container-log.js
index 9e4c7d3..8562bf5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-container-log.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-container-log.js
@@ -49,7 +49,7 @@ export default AbstractRoute.extend({
   afterModel(model) {
     // Handle errors and redirect if promise is rejected.
     if (model.errors && model.errors[0]) {
-      if (model.errors[0].status == 404) {
+      if (parseInt(model.errors[0].status) === 404) {
         this.replaceWith('/notfound');
       } else {
         this.replaceWith('/error');

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
index 4ab5716..436c6d8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
@@ -16,6 +16,8 @@
  * limitations under the License.
  */
 
+import Ember from 'ember';
+
 export default Ember.Route.extend({
   beforeModel() {
     this.transitionTo('yarn-queues.root');

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
index 3de377a..f8f598b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
@@ -20,13 +20,12 @@ import DS from 'ember-data';
 import Converter from 'yarn-ui/utils/converter';
 
 export default DS.JSONAPISerializer.extend({
-    internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
-      requestType) {
-      
+    internalNormalizeSingleResponse(store, primaryModelClass, payload) {
+
       if (payload.appAttempt) {
-        payload = payload.appAttempt;  
+        payload = payload.appAttempt;
       }
-      
+
       var fixedPayload = {
         id: payload.appAttemptId,
         type: primaryModelClass.modelName, // yarn-app
@@ -48,15 +47,13 @@ export default DS.JSONAPISerializer.extend({
       return fixedPayload;
     },
 
-    normalizeSingleResponse(store, primaryModelClass, payload, id,
-      requestType) {
-      var p = this.internalNormalizeSingleResponse(store, 
-        primaryModelClass, payload, id, requestType);
+    normalizeSingleResponse(store, primaryModelClass, payload/*, id, requestType*/) {
+      var p = this.internalNormalizeSingleResponse(store,
+        primaryModelClass, payload);
       return { data: p };
     },
 
-    normalizeArrayResponse(store, primaryModelClass, payload, id,
-      requestType) {
+    normalizeArrayResponse(store, primaryModelClass, payload/*, id, requestType*/) {
       // return expected is { data: [ {}, {} ] }
       var normalizedArrayResponse = {};
 
@@ -65,11 +62,11 @@ export default DS.JSONAPISerializer.extend({
         // need some error handling for ex apps or app may not be defined.
         normalizedArrayResponse.data = payload.appAttempts.appAttempt.map(singleApp => {
           return this.internalNormalizeSingleResponse(store, primaryModelClass,
-            singleApp, singleApp.id, requestType);
+            singleApp);
         }, this);
       } else {
         normalizedArrayResponse.data = [];
       }
       return normalizedArrayResponse;
     }
-});
\ No newline at end of file
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js
index fdba04a..7c82ec2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js
@@ -20,8 +20,7 @@ import DS from 'ember-data';
 import Converter from 'yarn-ui/utils/converter';
 
 export default DS.JSONAPISerializer.extend({
-    internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
-      requestType) {
+    internalNormalizeSingleResponse(store, primaryModelClass, payload, id) {
       if (payload.app) {
         payload = payload.app;
       }
@@ -76,15 +75,13 @@ export default DS.JSONAPISerializer.extend({
       return fixedPayload;
     },
 
-    normalizeSingleResponse(store, primaryModelClass, payload, id,
-      requestType) {
+    normalizeSingleResponse(store, primaryModelClass, payload, id/*, requestType*/) {
       var p = this.internalNormalizeSingleResponse(store,
-        primaryModelClass, payload, id, requestType);
+        primaryModelClass, payload, id);
       return { data: p };
     },
 
-    normalizeArrayResponse(store, primaryModelClass, payload, id,
-      requestType) {
+    normalizeArrayResponse(store, primaryModelClass, payload/*, id, requestType*/) {
       // return expected is { data: [ {}, {} ] }
       var normalizedArrayResponse = {};
 
@@ -93,7 +90,7 @@ export default DS.JSONAPISerializer.extend({
       if(payload.apps && payload.apps.app) {
         normalizedArrayResponse.data = payload.apps.app.map(singleApp => {
           return this.internalNormalizeSingleResponse(store, primaryModelClass,
-          singleApp, singleApp.id, requestType);
+            singleApp, singleApp.id);
           }, this);
       } else {
         normalizedArrayResponse.data = [];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-container-log.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-container-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-container-log.js
index 9e10615..2aacf72 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-container-log.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-container-log.js
@@ -20,8 +20,7 @@ import DS from 'ember-data';
 import Converter from 'yarn-ui/utils/converter';
 
 export default DS.JSONAPISerializer.extend({
-  normalizeSingleResponse(store, primaryModelClass, payload, id,
-      requestType) {
+  normalizeSingleResponse(store, primaryModelClass, payload, id/*, requestType*/) {
     // Convert plain text response into JSON.
     // ID is of the form nodeAddress!containerId!fileName
     var splits = Converter.splitForContainerLogs(id);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-container.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-container.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-container.js
index b9b923d..8ccff07 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-container.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-container.js
@@ -20,9 +20,8 @@ import DS from 'ember-data';
 import Converter from 'yarn-ui/utils/converter';
 
 export default DS.JSONAPISerializer.extend({
-    internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
-      requestType) {
-      
+    internalNormalizeSingleResponse(store, primaryModelClass, payload) {
+
       var fixedPayload = {
         id: payload.containerId,
         type: primaryModelClass.modelName, // yarn-app
@@ -44,15 +43,13 @@ export default DS.JSONAPISerializer.extend({
       return fixedPayload;
     },
 
-    normalizeSingleResponse(store, primaryModelClass, payload, id,
-      requestType) {
-      var p = this.internalNormalizeSingleResponse(store, 
-        primaryModelClass, payload, id, requestType);
+    normalizeSingleResponse(store, primaryModelClass, payload/*, id, requestType*/) {
+      var p = this.internalNormalizeSingleResponse(store,
+        primaryModelClass, payload);
       return { data: p };
     },
 
-    normalizeArrayResponse(store, primaryModelClass, payload, id,
-      requestType) {
+    normalizeArrayResponse(store, primaryModelClass, payload/*, id, requestType*/) {
       // return expected is { data: [ {}, {} ] }
       var normalizedArrayResponse = {};
 
@@ -62,12 +59,11 @@ export default DS.JSONAPISerializer.extend({
           // need some error handling for ex apps or app may not be defined.
           normalizedArrayResponse.data = payload.container.map(singleContainer => {
             return this.internalNormalizeSingleResponse(store, primaryModelClass,
-              singleContainer, singleContainer.id, requestType);
+              singleContainer);
           }, this);
         } else {
           normalizedArrayResponse.data = [this.internalNormalizeSingleResponse(
-            store, primaryModelClass, payload.container, payload.container.id,
-            requestType)];
+            store, primaryModelClass, payload.container)];
         }
         return normalizedArrayResponse;
       } else {
@@ -76,4 +72,4 @@ export default DS.JSONAPISerializer.extend({
 
       return normalizedArrayResponse;
     }
-});
\ No newline at end of file
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-app.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-app.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-app.js
index 3dfd776..9f6c425 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-app.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-app.js
@@ -15,26 +15,8 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 
 import DS from 'ember-data';
-import Ember from 'ember';
 
 export default DS.JSONAPISerializer.extend({
   internalNormalizeSingleResponse(store, primaryModelClass, payload) {
@@ -55,16 +37,14 @@ export default DS.JSONAPISerializer.extend({
     return fixedPayload;
   },
 
-  normalizeSingleResponse(store, primaryModelClass, payload, id,
-      requestType) {
+  normalizeSingleResponse(store, primaryModelClass, payload/*, id, requestType*/) {
     // payload is of the form {"app":{}}
     var p = this.internalNormalizeSingleResponse(store,
         primaryModelClass, payload);
     return { data: p };
   },
 
-  normalizeArrayResponse(store, primaryModelClass, payload, id,
-      requestType) {
+  normalizeArrayResponse(store, primaryModelClass, payload/*, id, requestType*/) {
     // expected return response is of the form { data: [ {}, {} ] }
     var normalizedArrayResponse = {};
     // payload is of the form { "apps" : { "app": [ {},{},{} ]  } }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-container.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-container.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-container.js
index bf19ad7..7e78987 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-container.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-container.js
@@ -17,7 +17,6 @@
  */
 
 import DS from 'ember-data';
-import Ember from 'ember';
 
 export default DS.JSONAPISerializer.extend({
   internalNormalizeSingleResponse(store, primaryModelClass, payload) {
@@ -42,16 +41,14 @@ export default DS.JSONAPISerializer.extend({
     return fixedPayload;
   },
 
-  normalizeSingleResponse(store, primaryModelClass, payload, id,
-    requestType) {
+  normalizeSingleResponse(store, primaryModelClass, payload/*, id, requestType*/) {
     // payload is of the form {"container":{}}
     var p = this.internalNormalizeSingleResponse(store,
         primaryModelClass, payload);
     return { data: p };
   },
 
-  normalizeArrayResponse(store, primaryModelClass, payload, id,
-      requestType) {
+  normalizeArrayResponse(store, primaryModelClass, payload/*, id, requestType*/) {
     // expected return response is of the form { data: [ {}, {} ] }
     var normalizedArrayResponse = {};
     if (payload.containers && payload.containers.container) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node.js
index 19308e2..0d9faec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node.js
@@ -20,8 +20,7 @@ import DS from 'ember-data';
 import Converter from 'yarn-ui/utils/converter';
 
 export default DS.JSONAPISerializer.extend({
-  internalNormalizeSingleResponse(store, primaryModelClass, payload, id,
-      requestType) {
+  internalNormalizeSingleResponse(store, primaryModelClass, payload, id) {
     if (payload.nodeInfo) {
       payload = payload.nodeInfo;
     }
@@ -46,11 +45,10 @@ export default DS.JSONAPISerializer.extend({
     return fixedPayload;
   },
 
-  normalizeSingleResponse(store, primaryModelClass, payload, id,
-      requestType) {
+  normalizeSingleResponse(store, primaryModelClass, payload, id/*, requestType*/) {
     // payload is of the form {"nodeInfo":{}}
     var p = this.internalNormalizeSingleResponse(store,
-        primaryModelClass, payload, id, requestType);
+        primaryModelClass, payload, id);
     return { data: p };
   },
 });


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/23] hadoop git commit: HADOOP-13895. Make FileStatus Serializable

Posted by jh...@apache.org.
HADOOP-13895. Make FileStatus Serializable


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59c5f187
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59c5f187
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59c5f187

Branch: refs/heads/YARN-5734
Commit: 59c5f18784121f04030d1d0982f2e2285688ee11
Parents: 3619ae3
Author: Chris Douglas <cd...@apache.org>
Authored: Wed Feb 1 10:19:36 2017 -0800
Committer: Chris Douglas <cd...@apache.org>
Committed: Wed Feb 1 10:19:36 2017 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/fs/BlockLocation.java     |  7 +++++--
 .../java/org/apache/hadoop/fs/FileStatus.java   | 21 +++++++++++++++++--
 .../hadoop/fs/permission/FsCreateModes.java     |  1 +
 .../hadoop/fs/permission/FsPermission.java      | 22 +++++++++++++++++---
 .../org/apache/hadoop/fs/TestFileStatus.java    | 19 +++++++++++++++++
 .../org/apache/hadoop/fs/HdfsBlockLocation.java | 17 +++++++++++++--
 .../hdfs/protocol/FsPermissionExtension.java    |  2 ++
 7 files changed, 80 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59c5f187/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
index 7811ef5..b8cad3a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.fs;
 
 import java.io.IOException;
+import java.io.Serializable;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -29,7 +30,9 @@ import org.apache.hadoop.classification.InterfaceStability;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class BlockLocation {
+public class BlockLocation implements Serializable {
+  private static final long serialVersionUID = 0x22986f6d;
+
   private String[] hosts; // Datanode hostnames
   private String[] cachedHosts; // Datanode hostnames with a cached replica
   private String[] names; // Datanode IP:xferPort for accessing the block
@@ -303,4 +306,4 @@ public class BlockLocation {
     }
     return result.toString();
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59c5f187/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
index 6a79768..72ca24f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
@@ -20,6 +20,9 @@ package org.apache.hadoop.fs;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.io.InvalidObjectException;
+import java.io.ObjectInputValidation;
+import java.io.Serializable;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -31,11 +34,14 @@ import org.apache.hadoop.io.Writable;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class FileStatus implements Writable, Comparable<FileStatus> {
+public class FileStatus implements Writable, Comparable<FileStatus>,
+    Serializable, ObjectInputValidation {
+
+  private static final long serialVersionUID = 0x13caeae8;
 
   private Path path;
   private long length;
-  private boolean isdir;
+  private Boolean isdir;
   private short block_replication;
   private long blocksize;
   private long modification_time;
@@ -387,4 +393,15 @@ public class FileStatus implements Writable, Comparable<FileStatus> {
     sb.append("}");
     return sb.toString();
   }
+
+  @Override
+  public void validateObject() throws InvalidObjectException {
+    if (null == path) {
+      throw new InvalidObjectException("No Path in deserialized FileStatus");
+    }
+    if (null == isdir) {
+      throw new InvalidObjectException("No type in deserialized FileStatus");
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59c5f187/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsCreateModes.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsCreateModes.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsCreateModes.java
index a1ed0d7..2bd6f1f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsCreateModes.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsCreateModes.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public final class FsCreateModes extends FsPermission {
+  private static final long serialVersionUID = 0x22986f6d;
   private final FsPermission unmasked;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59c5f187/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
index fabfc12..56e19dc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
@@ -20,6 +20,9 @@ package org.apache.hadoop.fs.permission;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.io.InvalidObjectException;
+import java.io.ObjectInputValidation;
+import java.io.Serializable;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -36,8 +39,10 @@ import org.apache.hadoop.io.WritableFactory;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class FsPermission implements Writable {
+public class FsPermission implements Writable, Serializable,
+    ObjectInputValidation {
   private static final Log LOG = LogFactory.getLog(FsPermission.class);
+  private static final long serialVersionUID = 0x2fe08564;
 
   static final WritableFactory FACTORY = new WritableFactory() {
     @Override
@@ -60,7 +65,7 @@ public class FsPermission implements Writable {
   private FsAction useraction = null;
   private FsAction groupaction = null;
   private FsAction otheraction = null;
-  private boolean stickyBit = false;
+  private Boolean stickyBit = false;
 
   private FsPermission() {}
 
@@ -202,7 +207,7 @@ public class FsPermission implements Writable {
       return this.useraction == that.useraction
           && this.groupaction == that.groupaction
           && this.otheraction == that.otheraction
-          && this.stickyBit == that.stickyBit;
+          && this.stickyBit.booleanValue() == that.stickyBit.booleanValue();
     }
     return false;
   }
@@ -377,6 +382,7 @@ public class FsPermission implements Writable {
   }
   
   private static class ImmutableFsPermission extends FsPermission {
+    private static final long serialVersionUID = 0x1bab54bd;
     public ImmutableFsPermission(short permission) {
       super(permission);
     }
@@ -386,4 +392,14 @@ public class FsPermission implements Writable {
       throw new UnsupportedOperationException();
     }
   }
+
+  @Override
+  public void validateObject() throws InvalidObjectException {
+    if (null == useraction || null == groupaction || null == otheraction) {
+      throw new InvalidObjectException("Invalid mode in FsPermission");
+    }
+    if (null == stickyBit) {
+      throw new InvalidObjectException("No sticky bit in FsPermission");
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59c5f187/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
index dd5279d..35f2bad 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
@@ -26,6 +26,8 @@ import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -216,6 +218,23 @@ public class TestFileStatus {
         MTIME, ATIME, PERMISSION, OWNER, GROUP, symlink, PATH);  
     validateToString(fileStatus);
   }
+
+  @Test
+  public void testSerializable() throws Exception {
+    Path p = new Path("uqsf://ybpnyubfg:8020/sbb/one/onm");
+    FsPermission perm = FsPermission.getFileDefault();
+    FileStatus stat = new FileStatus(4344L, false, 4, 512L << 20, 12345678L,
+        87654321L, perm, "yak", "dingo", p);
+    ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
+    try (ObjectOutputStream oos = new ObjectOutputStream(baos)) {
+      oos.writeObject(stat);
+    }
+    ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
+    try (ObjectInputStream ois = new ObjectInputStream(bais)) {
+      FileStatus deser = (FileStatus) ois.readObject();
+      assertEquals(stat, deser);
+    }
+  }
   
   /**
    * Validate the accessors for FileStatus.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59c5f187/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java
index eac3f96..2ee7f41 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/HdfsBlockLocation.java
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.fs;
 
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.Serializable;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -28,9 +32,10 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-public class HdfsBlockLocation extends BlockLocation {
+public class HdfsBlockLocation extends BlockLocation implements Serializable {
+  private static final long serialVersionUID = 0x7aecec92;
 
-  private final LocatedBlock block;
+  private transient LocatedBlock block;
 
   public HdfsBlockLocation(BlockLocation loc, LocatedBlock block) {
     // Initialize with data from passed in BlockLocation
@@ -41,4 +46,12 @@ public class HdfsBlockLocation extends BlockLocation {
   public LocatedBlock getLocatedBlock() {
     return block;
   }
+
+  private void readObject(ObjectInputStream ois)
+      throws IOException, ClassNotFoundException {
+    ois.defaultReadObject();
+    // LocatedBlock is not Serializable
+    block = null;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59c5f187/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java
index f74472d..786bb58 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java
@@ -29,6 +29,8 @@ import org.apache.hadoop.fs.permission.FsPermission;
  */
 @InterfaceAudience.Private
 public class FsPermissionExtension extends FsPermission {
+  private static final long serialVersionUID = 0x13c298a4;
+
   private final static short ACL_BIT = 1 << 12;
   private final static short ENCRYPTED_BIT = 1 << 13;
   private final boolean aclBit;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/23] hadoop git commit: HDFS-11335. Remove HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY usage from DNConf. (Manoj Govindassamy via lei)

Posted by jh...@apache.org.
HDFS-11335. Remove HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY usage from DNConf. (Manoj Govindassamy via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bec9b7aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bec9b7aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bec9b7aa

Branch: refs/heads/YARN-5734
Commit: bec9b7aa1dd3ed95b8783597135f8d90b3cc8dcd
Parents: 3e06475
Author: Lei Xu <le...@apache.org>
Authored: Wed Feb 1 14:42:51 2017 +0800
Committer: Lei Xu <le...@apache.org>
Committed: Wed Feb 1 14:42:51 2017 +0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java   | 15 ---------------
 .../hadoop/hdfs/server/datanode/BlockReceiver.java   |  3 ++-
 .../apache/hadoop/hdfs/server/datanode/DNConf.java   | 12 +-----------
 3 files changed, 3 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bec9b7aa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3cc4b5f..41c5aaa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1294,14 +1294,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int     DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE
       = HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_DEFAULT;
 
-
-  @Deprecated
-  public static final String  DFS_CLIENT_WRITE_PACKET_SIZE_KEY =
-      HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
-  @Deprecated
-  public static final int     DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT =
-      HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
-
   @Deprecated
   public static final String  DFS_CLIENT_SOCKET_TIMEOUT_KEY =
       HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
@@ -1395,13 +1387,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
       HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT;
 
   @Deprecated
-  public static final String  DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY =
-      HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY;
-
-  @Deprecated
-  public static final long    DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT =
-      HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT;
-  @Deprecated
   public static final String  DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS =
       HdfsClientConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS;
   @Deprecated

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bec9b7aa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index dd4b58b..5852403a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -163,7 +163,8 @@ class BlockReceiver implements Closeable {
       this.isDatanode = clientname.length() == 0;
       this.isClient = !this.isDatanode;
       this.restartBudget = datanode.getDnConf().restartReplicaExpiry;
-      this.datanodeSlowLogThresholdMs = datanode.getDnConf().datanodeSlowIoWarningThresholdMs;
+      this.datanodeSlowLogThresholdMs =
+          datanode.getDnConf().getSlowIoWarningThresholdMs();
       // For replaceBlock() calls response should be sent to avoid socketTimeout
       // at clients. So sending with the interval of 0.5 * socketTimeout
       final long readTimeout = datanode.getDnConf().socketTimeout;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bec9b7aa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
index e2c5fbc..2723677 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
@@ -33,8 +33,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NON_LOCAL_LAZY_P
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOW_PEERS_REPORT_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOW_PEERS_REPORT_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
-import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
-import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
@@ -101,10 +99,8 @@ public class DNConf {
   final long ibrInterval;
   final long initialBlockReportDelayMs;
   final long cacheReportInterval;
-  final long dfsclientSlowIoWarningThresholdMs;
   final long datanodeSlowIoWarningThresholdMs;
-  final int writePacketSize;
-  
+
   final String minimumNameNodeVersion;
   final String encryptionAlgorithm;
   final SaslPropertiesResolver saslPropsResolver;
@@ -151,9 +147,6 @@ public class DNConf {
         DFS_DATANODE_TRANSFERTO_ALLOWED_KEY,
         DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT);
 
-    writePacketSize = getConf().getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
-        DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
-    
     readaheadLength = getConf().getLong(
         HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_KEY,
         HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
@@ -194,9 +187,6 @@ public class DNConf {
         DFS_CACHEREPORT_INTERVAL_MSEC_KEY,
         DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT);
 
-    this.dfsclientSlowIoWarningThresholdMs = getConf().getLong(
-        HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY,
-        HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
     this.datanodeSlowIoWarningThresholdMs = getConf().getLong(
         DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY,
         DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/23] hadoop git commit: HDFS-11387. Socket reuse address option is not honored in PrivilegedNfsGatewayStarter. Contributed by Mukul Kumar Singh.

Posted by jh...@apache.org.
HDFS-11387. Socket reuse address option is not honored in PrivilegedNfsGatewayStarter. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0235842
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0235842
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0235842

Branch: refs/heads/YARN-5734
Commit: e0235842a74f1a1a62051ebb8c9dbd47324fcc25
Parents: 0914fcc
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Feb 3 12:28:05 2017 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Feb 3 12:28:05 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java      | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0235842/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
index 695cbc3..9456caf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
@@ -54,9 +54,11 @@ public class PrivilegedNfsGatewayStarter implements Daemon {
     }
 
     try {
-      registrationSocket = new DatagramSocket(
-                    new InetSocketAddress("localhost", clientPort));
+      InetSocketAddress socketAddress =
+                new InetSocketAddress("localhost", clientPort);
+      registrationSocket = new DatagramSocket(null);
       registrationSocket.setReuseAddress(true);
+      registrationSocket.bind(socketAddress);
     } catch (SocketException e) {
       LOG.error("Init failed for port=" + clientPort, e);
       throw e;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/23] hadoop git commit: YARN-4658. Typo in o.a.h.yarn.server.resourcemanager.scheduler.fair.TestFairScheduler comment (Contributed by Udai Potluri via Daniel Templeton)

Posted by jh...@apache.org.
YARN-4658. Typo in o.a.h.yarn.server.resourcemanager.scheduler.fair.TestFairScheduler comment (Contributed by Udai Potluri via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e064753
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e064753
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e064753

Branch: refs/heads/YARN-5734
Commit: 3e06475307e30377092547dfdebe6c2c6ac6e78f
Parents: 258991d
Author: Daniel Templeton <te...@apache.org>
Authored: Tue Jan 31 16:25:33 2017 -0800
Committer: Daniel Templeton <te...@apache.org>
Committed: Tue Jan 31 16:25:33 2017 -0800

----------------------------------------------------------------------
 .../resourcemanager/scheduler/fair/TestContinuousScheduling.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e064753/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
index 8bb06e7..1ea0032 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
@@ -261,7 +261,7 @@ public class TestContinuousScheduling extends FairSchedulerTestBase {
     FairScheduler spyScheduler = spy(scheduler);
     Assert.assertTrue("Continuous scheduling should be disabled.",
         !spyScheduler.isContinuousSchedulingEnabled());
-    // Add one nodes
+    // Add one node
     RMNode node1 =
         MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8), 1,
             "127.0.0.1");


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/23] hadoop git commit: HDFS-11112. Journal Nodes should refuse to format non-empty directories. Contributed by Yiqun Lin.

Posted by jh...@apache.org.
HDFS-11112. Journal Nodes should refuse to format non-empty directories. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6aa09dc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6aa09dc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6aa09dc2

Branch: refs/heads/YARN-5734
Commit: 6aa09dc28adc3e7c81258568ac08996eee0356da
Parents: b6f290d
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed Feb 1 16:51:58 2017 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed Feb 1 16:51:58 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hdfs/qjournal/server/JNStorage.java     |  7 +++++++
 .../apache/hadoop/hdfs/server/common/Storage.java  |  2 +-
 .../hadoop/hdfs/qjournal/server/TestJournal.java   | 17 +++++++++++++++++
 3 files changed, 25 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6aa09dc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
index 77171a1..07c9286 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
@@ -180,7 +180,14 @@ class JNStorage extends Storage {
   }
 
   void format(NamespaceInfo nsInfo) throws IOException {
+    unlockAll();
+    try {
+      sd.analyzeStorage(StartupOption.FORMAT, this, true);
+    } finally {
+      sd.unlock();
+    }
     setStorageInfo(nsInfo);
+
     LOG.info("Formatting journal " + sd + " with nsid: " + getNamespaceID());
     // Unlock the directory before formatting, because we will
     // re-analyze it after format(). The analyzeStorage() call

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6aa09dc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 519c28f..1af7877 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -550,7 +550,7 @@ public abstract class Storage extends StorageInfo {
           Files.newDirectoryStream(currentDir.toPath())) {
         if (dirStream.iterator().hasNext()) {
           throw new InconsistentFSStateException(root,
-              "Can't format the storage directory because the current/ "
+              "Can't format the storage directory because the current "
                   + "directory is not empty.");
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6aa09dc2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java
index 5cdc1a3..4c36bcb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java
@@ -204,6 +204,9 @@ public class TestJournal {
     
     // Close the journal in preparation for reformatting it.
     journal.close();
+    // Clear the storage directory before reformatting it
+    journal.getStorage().getJournalManager()
+        .getStorageDirectory().clearDirectory();
     journal.format(FAKE_NSINFO_2);
     
     assertEquals(0, journal.getLastPromisedEpoch());
@@ -417,4 +420,18 @@ public class TestJournal {
     }
   }
 
+  @Test
+  public void testFormatNonEmptyStorageDirectories() throws Exception {
+    try {
+      // Format again here and to format the non-empty directories in
+      // journal node.
+      journal.format(FAKE_NSINFO);
+      fail("Did not fail to format non-empty directories in journal node.");
+    } catch (IOException ioe) {
+      GenericTestUtils.assertExceptionContains(
+          "Can't format the storage directory because the current "
+              + "directory is not empty.", ioe);
+    }
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/23] hadoop git commit: HDFS-11353. Improve the unit tests relevant to DataNode volume failure testing. Contributed by Yiqun Lin.

Posted by jh...@apache.org.
HDFS-11353. Improve the unit tests relevant to DataNode volume failure testing. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3433f572
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3433f572
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3433f572

Branch: refs/heads/YARN-5734
Commit: 3433f572fafb9a7d7608915f3471fa6f025fa75c
Parents: 327c998
Author: Yiqun Lin <yq...@apache.org>
Authored: Thu Feb 2 19:38:17 2017 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Thu Feb 2 19:38:17 2017 +0800

----------------------------------------------------------------------
 .../hdfs/server/datanode/DataNodeTestUtils.java | 26 ++++++++
 .../datanode/TestDataNodeHotSwapVolumes.java    |  9 +--
 .../datanode/TestDataNodeVolumeFailure.java     | 65 +++++++++-----------
 .../TestDataNodeVolumeFailureReporting.java     | 12 +++-
 .../TestDataNodeVolumeFailureToleration.java    |  6 ++
 5 files changed, 72 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3433f572/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
index cf5b724..6d5ab71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
@@ -36,10 +36,13 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
+import com.google.common.base.Supplier;
+
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertThat;
 import static org.mockito.Matchers.any;
@@ -257,4 +260,27 @@ public class DataNodeTestUtils {
     }
     return null;
   }
+
+  /**
+   * Call and wait DataNode to detect disk failure.
+   *
+   * @param dn
+   * @param volume
+   * @throws Exception
+   */
+  public static void waitForDiskError(DataNode dn, FsVolumeSpi volume)
+      throws Exception {
+    LOG.info("Starting to wait for datanode to detect disk failure.");
+    final long lastDiskErrorCheck = dn.getLastDiskErrorCheck();
+    dn.checkDiskErrorAsync(volume);
+    // Wait 10 seconds for checkDiskError thread to finish and discover volume
+    // failures.
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+
+      @Override
+      public Boolean get() {
+        return dn.getLastDiskErrorCheck() != lastDiskErrorCheck;
+      }
+    }, 100, 10000);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3433f572/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 80ca0ff..5aec174 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -906,8 +906,7 @@ public class TestDataNodeHotSwapVolumes {
    */
   @Test(timeout=60000)
   public void testDirectlyReloadAfterCheckDiskError()
-      throws IOException, TimeoutException, InterruptedException,
-      ReconfigurationException {
+      throws Exception {
     // The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
     // volume failures which is currently not supported on Windows.
     assumeNotWindows();
@@ -926,11 +925,7 @@ public class TestDataNodeHotSwapVolumes {
 
     DataNodeTestUtils.injectDataDirFailure(dirToFail);
     // Call and wait DataNode to detect disk failure.
-    long lastDiskErrorCheck = dn.getLastDiskErrorCheck();
-    dn.checkDiskErrorAsync(failedVolume);
-    while (dn.getLastDiskErrorCheck() == lastDiskErrorCheck) {
-      Thread.sleep(100);
-    }
+    DataNodeTestUtils.waitForDiskError(dn, failedVolume);
 
     createFile(new Path("/test1"), 32, (short)2);
     assertEquals(used, failedVolume.getDfsUsed());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3433f572/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 100da02..e73337b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -34,16 +34,15 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
 
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.filefilter.TrueFileFilter;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.ReconfigurationException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FsTracer;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.BlockReader;
-import org.apache.hadoop.hdfs.client.impl.BlockReaderFactory;
 import org.apache.hadoop.hdfs.ClientContext;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -51,6 +50,7 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.RemotePeerFactory;
+import org.apache.hadoop.hdfs.client.impl.BlockReaderFactory;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -73,19 +73,16 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.filefilter.TrueFileFilter;
-
-import com.google.common.base.Supplier;
-
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
-
+import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Supplier;
+
 /**
  * Fine-grain testing of block files and locations after volume failure.
  */
@@ -111,6 +108,10 @@ public class TestDataNodeVolumeFailure {
   // block id to BlockLocs
   final Map<String, BlockLocs> block_map = new HashMap<String, BlockLocs> ();
 
+  // specific the timeout for entire test class
+  @Rule
+  public Timeout timeout = new Timeout(120 * 1000);
+
   @Before
   public void setUp() throws Exception {
     // bring up a cluster of 2
@@ -225,7 +226,7 @@ public class TestDataNodeVolumeFailure {
    */
   @Test(timeout=150000)
     public void testFailedVolumeBeingRemovedFromDataNode()
-      throws InterruptedException, IOException, TimeoutException {
+      throws Exception {
     // The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
     // volume failures which is currently not supported on Windows.
     assumeNotWindows();
@@ -237,7 +238,8 @@ public class TestDataNodeVolumeFailure {
     File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
     DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
     DataNode dn0 = cluster.getDataNodes().get(0);
-    checkDiskErrorSync(dn0, DataNodeTestUtils.getVolume(dn0, dn0Vol1));
+    DataNodeTestUtils.waitForDiskError(dn0,
+        DataNodeTestUtils.getVolume(dn0, dn0Vol1));
 
     // Verify dn0Vol1 has been completely removed from DN0.
     // 1. dn0Vol1 is removed from DataStorage.
@@ -284,35 +286,22 @@ public class TestDataNodeVolumeFailure {
     assertFalse(dataDirStrs[0].contains(dn0Vol1.getAbsolutePath()));
   }
 
-  private static void checkDiskErrorSync(DataNode dn, FsVolumeSpi volume)
-      throws InterruptedException {
-    final long lastDiskErrorCheck = dn.getLastDiskErrorCheck();
-    dn.checkDiskErrorAsync(volume);
-    // Wait 10 seconds for checkDiskError thread to finish and discover volume
-    // failures.
-    int count = 100;
-    while (count > 0 && dn.getLastDiskErrorCheck() == lastDiskErrorCheck) {
-      Thread.sleep(100);
-      count--;
-    }
-    assertTrue("Disk checking thread does not finish in 10 seconds",
-        count > 0);
-  }
-
   /**
    * Test DataNode stops when the number of failed volumes exceeds
    * dfs.datanode.failed.volumes.tolerated .
    */
   @Test(timeout=10000)
   public void testDataNodeShutdownAfterNumFailedVolumeExceedsTolerated()
-      throws InterruptedException, IOException {
+      throws Exception {
     // make both data directories to fail on dn0
     final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
     final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
     DataNodeTestUtils.injectDataDirFailure(dn0Vol1, dn0Vol2);
     DataNode dn0 = cluster.getDataNodes().get(0);
-    checkDiskErrorSync(dn0, DataNodeTestUtils.getVolume(dn0, dn0Vol1));
-    checkDiskErrorSync(dn0, DataNodeTestUtils.getVolume(dn0, dn0Vol2));
+    DataNodeTestUtils.waitForDiskError(dn0,
+        DataNodeTestUtils.getVolume(dn0, dn0Vol1));
+    DataNodeTestUtils.waitForDiskError(dn0,
+        DataNodeTestUtils.getVolume(dn0, dn0Vol2));
 
     // DN0 should stop after the number of failure disks exceed tolerated
     // value (1).
@@ -324,7 +313,7 @@ public class TestDataNodeVolumeFailure {
    */
   @Test
   public void testVolumeFailureRecoveredByHotSwappingVolume()
-      throws InterruptedException, ReconfigurationException, IOException {
+      throws Exception {
     final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
     final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
     final DataNode dn0 = cluster.getDataNodes().get(0);
@@ -333,7 +322,8 @@ public class TestDataNodeVolumeFailure {
 
     // Fail dn0Vol1 first.
     DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
-    checkDiskErrorSync(dn0, DataNodeTestUtils.getVolume(dn0, dn0Vol1));
+    DataNodeTestUtils.waitForDiskError(dn0,
+        DataNodeTestUtils.getVolume(dn0, dn0Vol1));
 
     // Hot swap out the failure volume.
     String dataDirs = dn0Vol2.getPath();
@@ -352,7 +342,8 @@ public class TestDataNodeVolumeFailure {
     // Fail dn0Vol2. Now since dn0Vol1 has been fixed, DN0 has sufficient
     // resources, thus it should keep running.
     DataNodeTestUtils.injectDataDirFailure(dn0Vol2);
-    checkDiskErrorSync(dn0, DataNodeTestUtils.getVolume(dn0, dn0Vol2));
+    DataNodeTestUtils.waitForDiskError(dn0,
+        DataNodeTestUtils.getVolume(dn0, dn0Vol2));
     assertTrue(dn0.shouldRun());
   }
 
@@ -362,7 +353,7 @@ public class TestDataNodeVolumeFailure {
    */
   @Test
   public void testTolerateVolumeFailuresAfterAddingMoreVolumes()
-      throws InterruptedException, ReconfigurationException, IOException {
+      throws Exception {
     final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
     final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
     final File dn0VolNew = new File(dataDir, "data_new");
@@ -379,12 +370,14 @@ public class TestDataNodeVolumeFailure {
 
     // Fail dn0Vol1 first and hot swap it.
     DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
-    checkDiskErrorSync(dn0, DataNodeTestUtils.getVolume(dn0, dn0Vol1));
+    DataNodeTestUtils.waitForDiskError(dn0,
+        DataNodeTestUtils.getVolume(dn0, dn0Vol1));
     assertTrue(dn0.shouldRun());
 
     // Fail dn0Vol2, now dn0 should stop, because we only tolerate 1 disk failure.
     DataNodeTestUtils.injectDataDirFailure(dn0Vol2);
-    checkDiskErrorSync(dn0, DataNodeTestUtils.getVolume(dn0, dn0Vol2));
+    DataNodeTestUtils.waitForDiskError(dn0,
+        DataNodeTestUtils.getVolume(dn0, dn0Vol2));
     assertFalse(dn0.shouldRun());
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3433f572/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
index 3015e61..fbbc7f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
@@ -52,7 +52,9 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 
 /**
  * Test reporting of DN volume failure counts and metrics.
@@ -80,6 +82,10 @@ public class TestDataNodeVolumeFailureReporting {
   // a datanode to be considered dead by the namenode.  
   final int WAIT_FOR_DEATH = 15000;
 
+  // specific the timeout for entire test class
+  @Rule
+  public Timeout timeout = new Timeout(120 * 1000);
+
   @Before
   public void setUp() throws Exception {
     // These tests use DataNodeTestUtils#injectDataDirFailure() to simulate
@@ -204,13 +210,13 @@ public class TestDataNodeVolumeFailureReporting {
     DFSTestUtil.createFile(fs, file3, 1024, (short)3, 1L);
     DFSTestUtil.waitReplication(fs, file3, (short)2);
 
-    // The DN should consider itself dead
-    DFSTestUtil.waitForDatanodeDeath(dns.get(2));
-
     // And report two failed volumes
     checkFailuresAtDataNode(dns.get(2), 2, true, dn3Vol1.getAbsolutePath(),
         dn3Vol2.getAbsolutePath());
 
+    // The DN should consider itself dead
+    DFSTestUtil.waitForDatanodeDeath(dns.get(2));
+
     // The NN considers the DN dead
     DFSTestUtil.waitForDatanodeStatus(dm, 2, 1, 2, 
         origCapacity - (4*dnCapacity), WAIT_FOR_HEARTBEATS);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3433f572/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
index de50ccb..46f9bf7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
@@ -39,7 +39,9 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 
 /**
  * Test the ability of a DN to tolerate volume failures.
@@ -58,6 +60,10 @@ public class TestDataNodeVolumeFailureToleration {
   // a datanode to be considered dead by the namenode.  
   final int WAIT_FOR_DEATH = 15000;
 
+  // specific the timeout for entire test class
+  @Rule
+  public Timeout timeout = new Timeout(120 * 1000);
+
   @Before
   public void setUp() throws Exception {
     conf = new HdfsConfiguration();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/23] hadoop git commit: MAPREDUCE-6644. Use doxia macro to generate in-page TOC of MapReduce site documentation. (iwasakims)

Posted by jh...@apache.org.
MAPREDUCE-6644. Use doxia macro to generate in-page TOC of MapReduce site documentation. (iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3619ae32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3619ae32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3619ae32

Branch: refs/heads/YARN-5734
Commit: 3619ae32bea227f17ecc9ef964e9194bc09fac0b
Parents: bec9b7a
Author: Masatake Iwasaki <iw...@apache.org>
Authored: Thu Feb 2 01:19:36 2017 +0900
Committer: Masatake Iwasaki <iw...@apache.org>
Committed: Thu Feb 2 01:19:36 2017 +0900

----------------------------------------------------------------------
 .../site/markdown/DistributedCacheDeploy.md.vm  |  2 +
 .../src/site/markdown/EncryptedShuffle.md       |  2 +
 .../src/site/markdown/MapReduceTutorial.md      | 45 +-------------------
 .../MapReduce_Compatibility_Hadoop1_Hadoop2.md  |  2 +
 .../src/site/markdown/MapredAppMasterRest.md    | 16 +------
 .../src/site/markdown/MapredCommands.md         | 14 +-----
 .../PluggableShuffleAndPluggableSort.md         |  2 +
 .../src/site/markdown/HistoryServerRest.md      | 22 +---------
 8 files changed, 12 insertions(+), 93 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3619ae32/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistributedCacheDeploy.md.vm
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistributedCacheDeploy.md.vm b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistributedCacheDeploy.md.vm
index 36ad8fc..c69be1c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistributedCacheDeploy.md.vm
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistributedCacheDeploy.md.vm
@@ -19,6 +19,8 @@
 Hadoop: Distributed Cache Deploy
 ================================
 
+<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
+
 Introduction
 ------------
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3619ae32/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/EncryptedShuffle.md
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/EncryptedShuffle.md b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/EncryptedShuffle.md
index 6aa4669..1b109a3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/EncryptedShuffle.md
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/EncryptedShuffle.md
@@ -15,6 +15,8 @@
 Hadoop: Encrypted Shuffle
 =========================
 
+<!-- MACRO{toc|fromDepth=0|toDepth=2} -->
+
 Introduction
 ------------
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3619ae32/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
index 8dee03c..6747adc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
@@ -15,50 +15,7 @@
 MapReduce Tutorial
 ==================
 
-* [MapReduce Tutorial](#MapReduce_Tutorial)
-    * [Purpose](#Purpose)
-    * [Prerequisites](#Prerequisites)
-    * [Overview](#Overview)
-    * [Inputs and Outputs](#Inputs_and_Outputs)
-    * [Example: WordCount v1.0](#Example:_WordCount_v1.0)
-        * [Source Code](#Source_Code)
-        * [Usage](#Usage)
-        * [Walk-through](#Walk-through)
-    * [MapReduce - User Interfaces](#MapReduce_-_User_Interfaces)
-        * [Payload](#Payload)
-            * [Mapper](#Mapper)
-            * [Reducer](#Reducer)
-            * [Partitioner](#Partitioner)
-            * [Counter](#Counter)
-        * [Job Configuration](#Job_Configuration)
-        * [Task Execution & Environment](#Task_Execution__Environment)
-            * [Memory Management](#Memory_Management)
-            * [Map Parameters](#Map_Parameters)
-            * [Shuffle/Reduce Parameters](#ShuffleReduce_Parameters)
-            * [Configured Parameters](#Configured_Parameters)
-            * [Task Logs](#Task_Logs)
-            * [Distributing Libraries](#Distributing_Libraries)
-        * [Job Submission and Monitoring](#Job_Submission_and_Monitoring)
-            * [Job Control](#Job_Control)
-        * [Job Input](#Job_Input)
-            * [InputSplit](#InputSplit)
-            * [RecordReader](#RecordReader)
-        * [Job Output](#Job_Output)
-            * [OutputCommitter](#OutputCommitter)
-            * [Task Side-Effect Files](#Task_Side-Effect_Files)
-            * [RecordWriter](#RecordWriter)
-        * [Other Useful Features](#Other_Useful_Features)
-            * [Submitting Jobs to Queues](#Submitting_Jobs_to_Queues)
-            * [Counters](#Counters)
-            * [DistributedCache](#DistributedCache)
-            * [Profiling](#Profiling)
-            * [Debugging](#Debugging)
-            * [Data Compression](#Data_Compression)
-            * [Skipping Bad Records](#Skipping_Bad_Records)
-        * [Example: WordCount v2.0](#Example:_WordCount_v2.0)
-            * [Source Code](#Source_Code)
-            * [Sample Runs](#Sample_Runs)
-            * [Highlights](#Highlights)
+<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
 
 Purpose
 -------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3619ae32/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduce_Compatibility_Hadoop1_Hadoop2.md
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduce_Compatibility_Hadoop1_Hadoop2.md b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduce_Compatibility_Hadoop1_Hadoop2.md
index 11caab7..fc66a16 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduce_Compatibility_Hadoop1_Hadoop2.md
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduce_Compatibility_Hadoop1_Hadoop2.md
@@ -15,6 +15,8 @@
 Apache Hadoop MapReduce - Migrating from Apache Hadoop 1.x to Apache Hadoop 2.x
 ===============================================================================
 
+<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
+
 Introduction
 ------------
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3619ae32/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredAppMasterRest.md
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredAppMasterRest.md b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredAppMasterRest.md
index e128eb1..7f8c54f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredAppMasterRest.md
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredAppMasterRest.md
@@ -15,21 +15,7 @@
 MapReduce Application Master REST API's.
 ========================================
 
-* [MapReduce Application Master REST API's.](#MapReduce_Application_Master_REST_APIs.)
-    * [Overview](#Overview)
-    * [Mapreduce Application Master Info API](#Mapreduce_Application_Master_Info_API)
-    * [Jobs API](#Jobs_API)
-    * [Job API](#Job_API)
-    * [Job Attempts API](#Job_Attempts_API)
-    * [Job Counters API](#Job_Counters_API)
-    * [Job Conf API](#Job_Conf_API)
-    * [Tasks API](#Tasks_API)
-    * [Task API](#Task_API)
-    * [Task Counters API](#Task_Counters_API)
-    * [Task Attempts API](#Task_Attempts_API)
-    * [Task Attempt API](#Task_Attempt_API)
-    * [Task Attempt State API](#Task_Attempt_State_API)
-    * [Task Attempt Counters API](#Task_Attempt_Counters_API)
+<!-- MACRO{toc|fromDepth=0|toDepth=1} -->
 
 Overview
 --------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3619ae32/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
index 6b7de2b..df07024 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
@@ -15,19 +15,7 @@
 MapReduce Commands Guide
 ========================
 
-* [Overview](#Overview)
-* [User Commands](#User_Commands)
-    * [archive](#archive)
-    * [archive-logs](#archive-logs)
-    * [classpath](#classpath)
-    * [distcp](#distcp)
-    * [job](#job)
-    * [pipes](#pipes)
-    * [queue](#queue)
-    * [version](#version)
-* [Administration Commands](#Administration_Commands)
-    * [historyserver](#historyserver)
-    * [hsadmin](#hsadmin)
+<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
 
 Overview
 --------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3619ae32/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
index 3cfa39d..5ea0567 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/PluggableShuffleAndPluggableSort.md
@@ -15,6 +15,8 @@
 Hadoop: Pluggable Shuffle and Pluggable Sort
 ============================================
 
+<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
+
 Introduction
 ------------
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3619ae32/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
index 8b92ed6..4369a31 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
@@ -15,27 +15,7 @@
 MapReduce History Server REST API's.
 ====================================
 
-* [MapReduce History Server REST API's.](#MapReduce_History_Server_REST_APIs.)
-    * [Overview](#Overview)
-    * [History Server Information API](#History_Server_Information_API)
-        * [URI](#URI)
-        * [HTTP Operations Supported](#HTTP_Operations_Supported)
-        * [Query Parameters Supported](#Query_Parameters_Supported)
-        * [Elements of the historyInfo object](#Elements_of_the_historyInfo_object)
-        * [Response Examples](#Response_Examples)
-    * [MapReduce API's](#MapReduce_APIs)
-        * [Jobs API](#Jobs_API)
-        * [Job API](#Job_API)
-        * [Elements of the acls object](#Elements_of_the_acls_object)
-        * [Job Attempts API](#Job_Attempts_API)
-        * [Job Counters API](#Job_Counters_API)
-        * [Job Conf API](#Job_Conf_API)
-        * [Tasks API](#Tasks_API)
-        * [Task API](#Task_API)
-        * [Task Counters API](#Task_Counters_API)
-        * [Task Attempts API](#Task_Attempts_API)
-        * [Task Attempt API](#Task_Attempt_API)
-        * [Task Attempt Counters API](#Task_Attempt_Counters_API)
+<!-- MACRO{toc|fromDepth=0|toDepth=2} -->
 
 Overview
 --------


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/23] hadoop git commit: HADOOP-14018. shaded jars of hadoop-client modules are missing hadoop's root LICENSE and NOTICE files. Contributed by Elek, Marton.

Posted by jh...@apache.org.
HADOOP-14018. shaded jars of hadoop-client modules are missing hadoop's root LICENSE and NOTICE files. Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/258991dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/258991dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/258991dc

Branch: refs/heads/YARN-5734
Commit: 258991dc5a3b13640834d1c8803f2eb840f8afea
Parents: 4c6bae5
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Jan 31 11:38:34 2017 -0800
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Jan 31 11:38:34 2017 -0800

----------------------------------------------------------------------
 hadoop-client-modules/hadoop-client-api/pom.xml | 12 +++++++++--
 .../hadoop-client-minicluster/pom.xml           | 22 ++++++++++++++++++--
 .../hadoop-client-runtime/pom.xml               | 16 ++++++++++++--
 3 files changed, 44 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/258991dc/hadoop-client-modules/hadoop-client-api/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-api/pom.xml b/hadoop-client-modules/hadoop-client-api/pom.xml
index 4c8bcc6..76610cd 100644
--- a/hadoop-client-modules/hadoop-client-api/pom.xml
+++ b/hadoop-client-modules/hadoop-client-api/pom.xml
@@ -204,8 +204,16 @@
                     <!-- Needed until MSHADE-182 -->
                     <transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
                     <transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
-                    <transformer implementation="org.apache.maven.plugins.shade.resource.ApacheNoticeResourceTransformer">
-                        <addHeader>false</addHeader>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
+                      <resource>NOTICE.txt</resource>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/LICENSE.txt</resource>
+                      <file>${basedir}/../../LICENSE.txt</file>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/NOTICE.txt</resource>
+                      <file>${basedir}/../../NOTICE.txt</file>
                     </transformer>
                   </transformers>
                 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/258991dc/hadoop-client-modules/hadoop-client-minicluster/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 83d2748..c58ac38 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -731,8 +731,26 @@
                     <!-- Needed until MSHADE-182 -->
                     <transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
                     <transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
-                    <transformer implementation="org.apache.maven.plugins.shade.resource.ApacheNoticeResourceTransformer">
-                      <addHeader>false</addHeader>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
+                      <resources>
+                        <resource>LICENSE</resource>
+                        <resource>LICENSE.txt</resource>
+                        <resource>NOTICE</resource>
+                        <resource>NOTICE.txt</resource>
+                        <resource>Grizzly_THIRDPARTYLICENSEREADME.txt</resource>
+                        <resource>LICENSE.dom-documentation.txt</resource>
+                        <resource>LICENSE.dom-software.txt</resource>
+                        <resource>LICENSE.dom-documentation.txt</resource>
+                        <resource>LICENSE.sax.txt</resource>
+                      </resources>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/LICENSE.txt</resource>
+                      <file>${basedir}/../../LICENSE.txt</file>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/NOTICE.txt</resource>
+                      <file>${basedir}/../../NOTICE.txt</file>
                     </transformer>
                   </transformers>
                 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/258991dc/hadoop-client-modules/hadoop-client-runtime/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index cff3329..151191c 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -292,8 +292,20 @@
                     <!-- Needed until MSHADE-182 -->
                     <transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
                     <transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
-                    <transformer implementation="org.apache.maven.plugins.shade.resource.ApacheNoticeResourceTransformer">
-                      <addHeader>false</addHeader>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
+                      <resources>
+                        <resource>NOTICE.txt</resource>
+                        <resource>NOTICE</resource>
+                        <resource>LICENSE</resource>
+                      </resources>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/LICENSE.txt</resource>
+                      <file>${basedir}/../../LICENSE.txt</file>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/NOTICE.txt</resource>
+                      <file>${basedir}/../../NOTICE.txt</file>
                     </transformer>
                     <transformer implementation="org.apache.maven.plugins.shade.resource.XmlAppendingTransformer">
                       <resource>META-INF/jboss-beans.xml</resource>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/23] hadoop git commit: HADOOP-14047. Require admin to access KMS instrumentation servlets. Contributed by John Zhuge.

Posted by jh...@apache.org.
HADOOP-14047. Require admin to access KMS instrumentation servlets. Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d88497d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d88497d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d88497d4

Branch: refs/heads/YARN-5734
Commit: d88497d44a7c34ae4cf0295c89b3584d834057d5
Parents: 663e683
Author: Xiao Chen <xi...@apache.org>
Authored: Mon Feb 6 13:14:17 2017 -0800
Committer: Xiao Chen <xi...@apache.org>
Committed: Mon Feb 6 13:14:17 2017 -0800

----------------------------------------------------------------------
 .../crypto/key/kms/server/KMSConfiguration.java |  2 ++
 .../hadoop/crypto/key/kms/server/KMSWebApp.java | 10 ------
 .../crypto/key/kms/server/KMSWebServer.java     |  3 ++
 .../src/main/resources/kms-default.xml          | 14 ++++++++
 .../hadoop-kms/src/site/markdown/index.md.vm    | 38 ++++++++++++++++++--
 5 files changed, 55 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d88497d4/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
index 1ef6c4e..cf02dd1 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
@@ -48,6 +48,8 @@ public class KMSConfiguration {
   public static final int HTTP_PORT_DEFAULT = 9600;
   public static final String HTTP_HOST_KEY = "hadoop.kms.http.host";
   public static final String HTTP_HOST_DEFAULT = "0.0.0.0";
+  public static final String HTTP_ADMINS_KEY =
+      "hadoop.kms.http.administrators";
 
   // SSL properties
   public static final String SSL_ENABLED_KEY = "hadoop.kms.ssl.enabled";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d88497d4/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
index 857139f..ac24105 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
@@ -34,9 +34,7 @@ import org.apache.hadoop.crypto.key.CachingKeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
-import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.PropertyConfigurator;
 import org.slf4j.Logger;
@@ -144,14 +142,6 @@ public class KMSWebApp implements ServletContextListener {
 
       kmsAudit = new KMSAudit(kmsConf);
 
-      // this is required for the the JMXJsonServlet to work properly.
-      // the JMXJsonServlet is behind the authentication filter,
-      // thus the '*' ACL.
-      sce.getServletContext().setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE,
-          kmsConf);
-      sce.getServletContext().setAttribute(HttpServer2.ADMINS_ACL,
-          new AccessControlList(AccessControlList.WILDCARD_ACL_VALUE));
-
       // intializing the KeyProvider
       String providerString = kmsConf.get(KMSConfiguration.KEY_PROVIDER_URI);
       if (providerString == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d88497d4/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
index 02c4a42..1141824 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.ConfigurationWithLogging;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
+import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
@@ -84,6 +85,8 @@ public class KMSWebServer {
         .setConf(conf)
         .setSSLConf(sslConf)
         .authFilterConfigurationPrefix(KMSAuthenticationFilter.CONFIG_PREFIX)
+        .setACL(new AccessControlList(conf.get(
+            KMSConfiguration.HTTP_ADMINS_KEY, " ")))
         .addEndpoint(endpoint)
         .build();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d88497d4/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml b/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
index 2b178b8..7055f2d 100644
--- a/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
+++ b/hadoop-common-project/hadoop-kms/src/main/resources/kms-default.xml
@@ -38,6 +38,20 @@
   </property>
 
   <property>
+    <name>hadoop.kms.http.administrators</name>
+    <value></value>
+    <description>ACL for the admins, this configuration is used to control
+      who can access the default KMS servlets. The value should be a comma
+      separated list of users and groups. The user list comes first and is
+      separated by a space followed by the group list,
+      e.g. "user1,user2 group1,group2". Both users and groups are optional,
+      so "user1", " group1", "", "user1 group1", "user1,user2 group1,group2"
+      are all valid (note the leading space in " group1"). '*' grants access
+      to all users and groups, e.g. '*', '* ' and ' *' are all valid.
+    </description>
+  </property>
+
+  <property>
     <name>hadoop.kms.ssl.enabled</name>
     <value>false</value>
     <description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d88497d4/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
index 09284e5..7b4b518 100644
--- a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
+++ b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
@@ -1063,13 +1063,13 @@ configuration properties instead.
 
 Environment Variable     | Configuration Property       | Configuration File
 -------------------------|------------------------------|--------------------
+KMS_TEMP                 | hadoop.http.temp.dir         | kms-site.xml
 KMS_HTTP_PORT            | hadoop.kms.http.port         | kms-site.xml
 KMS_MAX_HTTP_HEADER_SIZE | hadoop.http.max.request.header.size and hadoop.http.max.response.header.size | kms-site.xml
 KMS_MAX_THREADS          | hadoop.http.max.threads      | kms-site.xml
 KMS_SSL_ENABLED          | hadoop.kms.ssl.enabled       | kms-site.xml
 KMS_SSL_KEYSTORE_FILE    | ssl.server.keystore.location | ssl-server.xml
 KMS_SSL_KEYSTORE_PASS    | ssl.server.keystore.password | ssl-server.xml
-KMS_TEMP                 | hadoop.http.temp.dir         | kms-site.xml
 
 $H3 Default HTTP Services
 
@@ -1080,4 +1080,38 @@ Name               | Description
 /logLevel          | Get or set log level per class
 /logs              | Display log files
 /stacks            | Display JVM stacks
-/static/index.html | The static home page
\ No newline at end of file
+/static/index.html | The static home page
+
+To control the access to servlet `/conf`, `/jmx`, `/logLevel`, `/logs`,
+and `/stacks`, configure the following properties in `kms-site.xml`:
+
+```xml
+  <property>
+    <name>hadoop.security.authorization</name>
+    <value>true</value>
+    <description>Is service-level authorization enabled?</description>
+  </property>
+
+  <property>
+    <name>hadoop.security.instrumentation.requires.admin</name>
+    <value>true</value>
+    <description>
+      Indicates if administrator ACLs are required to access
+      instrumentation servlets (JMX, METRICS, CONF, STACKS).
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.http.administrators</name>
+    <value></value>
+    <description>ACL for the admins, this configuration is used to control
+      who can access the default KMS servlets. The value should be a comma
+      separated list of users and groups. The user list comes first and is
+      separated by a space followed by the group list,
+      e.g. "user1,user2 group1,group2". Both users and groups are optional,
+      so "user1", " group1", "", "user1 group1", "user1,user2 group1,group2"
+      are all valid (note the leading space in " group1"). '*' grants access
+      to all users and groups, e.g. '*', '* ' and ' *' are all valid.
+    </description>
+  </property>
+```
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/23] hadoop git commit: HDFS-11363. Need more diagnosis info when seeing Slow waitForAckedSeqno.

Posted by jh...@apache.org.
HDFS-11363. Need more diagnosis info when seeing Slow waitForAckedSeqno.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0914fcca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0914fcca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0914fcca

Branch: refs/heads/YARN-5734
Commit: 0914fcca312b5e9d20bcf1b6633bc13c9034ba46
Parents: 3433f57
Author: Xiao Chen <xi...@apache.org>
Authored: Thu Feb 2 10:08:27 2017 -0800
Committer: Xiao Chen <xi...@apache.org>
Committed: Thu Feb 2 10:08:27 2017 -0800

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hdfs/DataStreamer.java      | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0914fcca/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index ef5d21a..8e6eb63 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -865,8 +865,9 @@ class DataStreamer extends Daemon {
       }
       long duration = Time.monotonicNow() - begin;
       if (duration > dfsclientSlowLogThresholdMs) {
-        LOG.warn("Slow waitForAckedSeqno took " + duration
-            + "ms (threshold=" + dfsclientSlowLogThresholdMs + "ms)");
+        LOG.warn("Slow waitForAckedSeqno took {}ms (threshold={}ms). File being"
+                + " written: {}, block: {}, Write pipeline datanodes: {}.",
+            duration, dfsclientSlowLogThresholdMs, src, block, nodes);
       }
     }
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/23] hadoop git commit: MAPREDUCE-6338. MR AppMaster does not honor ephemeral port range. Contributed by Frank Nguyen.

Posted by jh...@apache.org.
MAPREDUCE-6338. MR AppMaster does not honor ephemeral port range. Contributed by Frank Nguyen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d401e63b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d401e63b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d401e63b

Branch: refs/heads/YARN-5734
Commit: d401e63b6c3695d1f8f3f4958b8d592b15342b17
Parents: 3ea6d35
Author: Junping Du <ju...@apache.org>
Authored: Sun Feb 5 19:28:01 2017 -0800
Committer: Junping Du <ju...@apache.org>
Committed: Sun Feb 5 19:28:01 2017 -0800

----------------------------------------------------------------------
 .../hadoop/mapred/TaskAttemptListenerImpl.java     | 17 ++++++++---------
 1 file changed, 8 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d401e63b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
index 2c0ea2b..5669f3e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
@@ -134,15 +134,14 @@ public class TaskAttemptListenerImpl extends CompositeService
   protected void startRpcServer() {
     Configuration conf = getConfig();
     try {
-      server = 
-          new RPC.Builder(conf).setProtocol(TaskUmbilicalProtocol.class)
-            .setInstance(this).setBindAddress("0.0.0.0")
-            .setPort(0).setNumHandlers(
-                conf.getInt(MRJobConfig.MR_AM_TASK_LISTENER_THREAD_COUNT, 
-                    MRJobConfig.DEFAULT_MR_AM_TASK_LISTENER_THREAD_COUNT))
-                    .setVerbose(false).setSecretManager(jobTokenSecretManager)
-                    .build();
-      
+      server = new RPC.Builder(conf).setProtocol(TaskUmbilicalProtocol.class)
+          .setInstance(this).setBindAddress("0.0.0.0")
+          .setPortRangeConfig(MRJobConfig.MR_AM_JOB_CLIENT_PORT_RANGE)
+          .setNumHandlers(
+          conf.getInt(MRJobConfig.MR_AM_TASK_LISTENER_THREAD_COUNT, 
+          MRJobConfig.DEFAULT_MR_AM_TASK_LISTENER_THREAD_COUNT))
+          .setVerbose(false).setSecretManager(jobTokenSecretManager).build();
+
       // Enable service authorization?
       if (conf.getBoolean(
           CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/23] hadoop git commit: YARN-5866. Fix few issues reported by jshint in new YARN UI. Contributed by Akhil P B.

Posted by jh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue.js
index 1c5b7b3..766c5c7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue.js
@@ -49,7 +49,7 @@ export default DS.JSONAPISerializer.extend({
           relationshipUserData.push({
             type: "YarnUser",
             id: u.username + "_" + payload.queueName,
-          })
+          });
         });
       }
 
@@ -85,12 +85,12 @@ export default DS.JSONAPISerializer.extend({
       return {
         queue: this._super(store, primaryModelClass, fixedPayload, id, requestType),
         includedData: includedData
-      }
+      };
     },
 
     handleQueue(store, primaryModelClass, payload, id, requestType) {
       var data = [];
-      var includedData = []
+      var includedData = [];
       var result = this.normalizeSingleResponse(store, primaryModelClass,
         payload, id, requestType);
 
@@ -112,8 +112,8 @@ export default DS.JSONAPISerializer.extend({
 
       return {
         data: data,
-        includedData, includedData
-      }
+        includedData: includedData
+      };
     },
 
     normalizeArrayResponse(store, primaryModelClass, payload, id,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-rm-node.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-rm-node.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-rm-node.js
index 6feab36..ad50621 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-rm-node.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-rm-node.js
@@ -16,7 +16,6 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
 import DS from 'ember-data';
 import Converter from 'yarn-ui/utils/converter';
 
@@ -48,16 +47,14 @@ export default DS.JSONAPISerializer.extend({
     return fixedPayload;
   },
 
-  normalizeSingleResponse(store, primaryModelClass, payload, id,
-      requestType) {
+  normalizeSingleResponse(store, primaryModelClass, payload, id/*, requestType*/) {
     // payload is of the form {"nodeInfo":{}}
     var p = this.internalNormalizeSingleResponse(store,
         primaryModelClass, payload, id);
     return { data: p };
   },
 
-  normalizeArrayResponse(store, primaryModelClass, payload, id,
-      requestType) {
+  normalizeArrayResponse(store, primaryModelClass, payload/*, id, requestType*/) {
     // expected response is of the form { data: [ {}, {} ] }
     var normalizedArrayResponse = {};
     if (payload.nodes) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/color-utils.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/color-utils.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/color-utils.js
index b96ec16..6c0cfee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/color-utils.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/color-utils.js
@@ -16,8 +16,6 @@
  * limitations under the License.
  */
 
-import Constants from 'yarn-ui/constants';
-
 export default {
   preDefinedColors : ["#1f77b4", "#aec7e8", "#ffbb78",
     "#98df8a", "#ff9896", "#9467bd", "#c5b0d5", "#8c564b",
@@ -43,14 +41,14 @@ export default {
       startIdx = Math.max(nColors - colorsTarget.length, 0);
     }
 
-    for (var i = 0; i < colorsTarget.length; i++) {
+    for (i = 0; i < colorsTarget.length; i++) {
       if (i + startIdx < nColors) {
         colors[i + startIdx] = this.getColorByTarget(colorsTarget[i]);
       }
     }
 
     var idx = 0;
-    for (var i = 0; i < nColors; i++) {
+    for (i = 0; i < nColors; i++) {
       if (!colors[i]) {
         colors[i] = this.preDefinedColors[i % this.preDefinedColors.length];
         idx ++;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js
index fb6b61c..448dd18 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js
@@ -110,7 +110,7 @@ export default {
     }
   },
   memoryToSimpliedUnit: function(mb) {
-    var unit = "MB"
+    var unit = "MB";
     var value = mb;
     if (value / 1024 >= 0.9) {
       value = value / 1024;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
index fd940a2..e36de4b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
@@ -16,8 +16,6 @@
  * limitations under the License.
  */
 
-import Constants from 'yarn-ui/constants';
-
 export default {
   getApplicationLink: function(applicationId) {
     return "#/yarn-app/" + applicationId;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/mock.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/mock.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/mock.js
index 62eebc1..3112acc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/mock.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/mock.js
@@ -32,5 +32,5 @@ export default {
     }
 
     ref.set("model", data);
-  },
-}
\ No newline at end of file
+  }
+};
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/sorter.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/sorter.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/sorter.js
index febef6f..c986798 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/sorter.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/sorter.js
@@ -51,19 +51,19 @@ export default {
  */
 function naturalSort(a, b) {
   var diff = a.length - b.length;
-  if (diff != 0) {
+  if (diff !== 0) {
     var splitA = a.split("_");
     var splitB = b.split("_");
-    if (splitA.length != splitB.length) {
+    if (splitA.length !== splitB.length) {
       return a.localeCompare(b);
     }
     for (var i = 1; i < splitA.length; i++) {
       var splitdiff = splitA[i].length - splitB[i].length;
-      if (splitdiff != 0) {
+      if (splitdiff !== 0) {
         return splitdiff;
       }
       var splitCompare = splitA[i].localeCompare(splitB[i]);
-      if (splitCompare != 0) {
+      if (splitCompare !== 0) {
         return splitCompare;
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
index 7736c75..b75a2e9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
@@ -22,7 +22,7 @@ var EmberApp = require('ember-cli/lib/broccoli/ember-app');
 
 module.exports = function(defaults) {
   var app = new EmberApp(defaults, {
-    hinting: false
+    hinting: true
   });
 
   app.import("bower_components/datatables/media/css/jquery.dataTables.min.css");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-container-log-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-container-log-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-container-log-test.js
index 45808a5..93cdec6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-container-log-test.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-container-log-test.js
@@ -17,6 +17,7 @@
  */
 
 import { moduleForModel, test } from 'ember-qunit';
+import Ember from 'ember';
 
 moduleForModel('yarn-container-log', 'Unit | Model | ContainerLog', {
   // Specify the other units that are required for this test.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-node-app-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-node-app-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-node-app-test.js
index 7e2e62f..3b91ea7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-node-app-test.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-node-app-test.js
@@ -17,6 +17,7 @@
  */
 
 import { moduleForModel, test } from 'ember-qunit';
+import Ember from 'ember';
 
 moduleForModel('yarn-node-app', 'Unit | Model | NodeApp', {
   // Specify the other units that are required for this test.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-node-container-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-node-container-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-node-container-test.js
index 88bf233..24ad4c4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-node-container-test.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-node-container-test.js
@@ -17,6 +17,7 @@
  */
 
 import { moduleForModel, test } from 'ember-qunit';
+import Ember from 'ember';
 
 moduleForModel('yarn-node-container', 'Unit | Model | NodeContainer', {
   // Specify the other units that are required for this test.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-node-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-node-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-node-test.js
index 5877589..8d7c831 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-node-test.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-node-test.js
@@ -17,6 +17,7 @@
  */
 
 import { moduleForModel, test } from 'ember-qunit';
+import Ember from 'ember';
 
 moduleForModel('yarn-node', 'Unit | Model | Node', {
   // Specify the other units that are required for this test.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-rm-node-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-rm-node-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-rm-node-test.js
index 4fd2517..2499ebc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-rm-node-test.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-rm-node-test.js
@@ -17,6 +17,7 @@
  */
 
 import { moduleForModel, test } from 'ember-qunit';
+import Ember from 'ember';
 
 moduleForModel('yarn-rm-node', 'Unit | Model | RMNode', {
   // Specify the other units that are required for this test.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
index 4e68da0..d0a78dd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-container-log-test.js
@@ -17,7 +17,7 @@
  */
 
 import { moduleFor, test } from 'ember-qunit';
-import Constants from 'yarn-ui/constants';
+import Ember from 'ember';
 
 moduleFor('route:yarn-container-log', 'Unit | Route | ContainerLog', {
 });
@@ -34,11 +34,11 @@ test('Test getting container log', function(assert) {
       containerID: "container_e32_1456000363780_0002_01_000001",
       logFileName: "syslog"};
   var store = {
-    findRecord: function(type) {
+    findRecord: function() {
       return new Ember.RSVP.Promise(function(resolve) {
         resolve(response);
-      }
-    )}
+      });
+    }
   };
   assert.expect(6);
   var route = this.subject();
@@ -67,11 +67,11 @@ test('Test non HTTP error while getting container log', function(assert) {
       containerID: "container_e32_1456000363780_0002_01_000001",
       logFileName: "syslog"};
   var store = {
-    findRecord: function(type) {
+    findRecord: function() {
       return new Ember.RSVP.Promise(function(resolve, reject) {
         reject(error);
-      }
-    )}
+      });
+    }
   };
   assert.expect(6);
   var route = this.subject();
@@ -92,16 +92,12 @@ test('Test non HTTP error while getting container log', function(assert) {
 
 test('Test HTTP error while getting container log', function(assert) {
   var error = {errors: [{status: 404, responseText: 'Not Found'}]};
-  var response = {
-      logs: "",
-      containerID: "container_e32_1456000363780_0002_01_000001",
-      logFileName: "syslog"};
   var store = {
-    findRecord: function(type) {
+    findRecord: function() {
       return new Ember.RSVP.Promise(function(resolve, reject) {
         reject(error);
-      }
-    )}
+      });
+    }
   };
   assert.expect(5);
   var route = this.subject();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-app-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-app-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-app-test.js
index 8e5acf9..a4f787e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-app-test.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-app-test.js
@@ -17,6 +17,7 @@
  */
 
 import { moduleFor, test } from 'ember-qunit';
+import Ember from 'ember';
 
 moduleFor('route:yarn-node-app', 'Unit | Route | NodeApp', {
 });
@@ -31,7 +32,7 @@ test('Test getting specific app on a node', function(assert) {
   var response =
       {id:"application_1456251210105_0001", state:"FINISHED", user:"root"};
   var store = {
-    queryRecord: function(type, query) {
+    queryRecord: function() {
       return new Ember.RSVP.Promise(function(resolve) {
         resolve(response);
       });
@@ -40,17 +41,16 @@ test('Test getting specific app on a node', function(assert) {
   assert.expect(6);
   var route = this.subject();
   route.set('store', store);
-  var model =
-      route.model({node_id:"localhost:64318", node_addr:"localhost:8042",
-          app_id:"application_1456251210105_0001"}).
-      then(
-        function(value){
-          assert.ok(value);
-          assert.ok(value.nodeApp);
-          assert.deepEqual(value.nodeApp, response);
-          assert.ok(value.nodeInfo);
-          assert.equal(value.nodeInfo.addr, 'localhost:8042');
-          assert.equal(value.nodeInfo.id, 'localhost:64318');
-        }
-      );
+  route.model({node_id:"localhost:64318", node_addr:"localhost:8042",
+      app_id:"application_1456251210105_0001"})
+    .then(
+      function(value){
+        assert.ok(value);
+        assert.ok(value.nodeApp);
+        assert.deepEqual(value.nodeApp, response);
+        assert.ok(value.nodeInfo);
+        assert.equal(value.nodeInfo.addr, 'localhost:8042');
+        assert.equal(value.nodeInfo.id, 'localhost:64318');
+      }
+    );
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-apps-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-apps-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-apps-test.js
index 44d9995..8666ca5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-apps-test.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-apps-test.js
@@ -17,6 +17,7 @@
  */
 
 import { moduleFor, test } from 'ember-qunit';
+import Ember from 'ember';
 
 moduleFor('route:yarn-node-apps', 'Unit | Route | NodeApps', {
 });
@@ -34,7 +35,7 @@ test('Test getting apps on a node', function(assert) {
       containerids:["container_e38_1456251210105_0002_01_000001",
       "container_e38_1456251210105_0002_01_000002"]}];
   var store = {
-    query: function(type, query) {
+    query: function() {
       return new Ember.RSVP.Promise(function(resolve) {
         resolve(response.slice());
       });
@@ -43,18 +44,17 @@ test('Test getting apps on a node', function(assert) {
   assert.expect(8);
   var route = this.subject();
   route.set('store', store);
-  var model =
-      route.model({node_id:"localhost:64318", node_addr:"localhost:8042"}).
-      then(
-        function(value){
-          assert.ok(value);
-          assert.ok(value.apps);
-          assert.equal(value.apps.length, 2);
-          assert.deepEqual(response[0], value.apps[0]);
-          assert.deepEqual(response[1], value.apps[1]);
-          assert.ok(value.nodeInfo);
-          assert.equal(value.nodeInfo.addr, 'localhost:8042');
-          assert.equal(value.nodeInfo.id, 'localhost:64318');
-        }
-      );
+  route.model({node_id:"localhost:64318", node_addr:"localhost:8042"})
+    .then(
+      function(value){
+        assert.ok(value);
+        assert.ok(value.apps);
+        assert.equal(value.apps.length, 2);
+        assert.deepEqual(response[0], value.apps[0]);
+        assert.deepEqual(response[1], value.apps[1]);
+        assert.ok(value.nodeInfo);
+        assert.equal(value.nodeInfo.addr, 'localhost:8042');
+        assert.equal(value.nodeInfo.id, 'localhost:64318');
+      }
+    );
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-container-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-container-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-container-test.js
index f0b68fc..f304d42 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-container-test.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-container-test.js
@@ -17,6 +17,7 @@
  */
 
 import { moduleFor, test } from 'ember-qunit';
+import Ember from 'ember';
 
 moduleFor('route:yarn-node-container', 'Unit | Route | NodeContainer', {
 });
@@ -36,7 +37,7 @@ test('Test getting specific container on a node', function(assert) {
       nodeId: "localhost:64318", containerLogFiles:["syslog","stderr",
       "stdout"]};
   var store = {
-    queryRecord: function(type, query) {
+    queryRecord: function() {
       return new Ember.RSVP.Promise(function(resolve) {
         resolve(response);
       });
@@ -45,17 +46,16 @@ test('Test getting specific container on a node', function(assert) {
   assert.expect(6);
   var route = this.subject();
   route.set('store', store);
-  var model =
-      route.model({node_id:"localhost:64318", node_addr:"localhost:8042",
-          container_id:"container_e32_1456000363780_0002_01_000001"}).
-      then(
-        function(value){
-          assert.ok(value);
-          assert.ok(value.nodeContainer);
-          assert.deepEqual(value.nodeContainer, response);
-          assert.ok(value.nodeInfo);
-          assert.equal(value.nodeInfo.addr, 'localhost:8042');
-          assert.equal(value.nodeInfo.id, 'localhost:64318');
-        }
-      );
+  route.model({node_id:"localhost:64318", node_addr:"localhost:8042",
+        container_id:"container_e32_1456000363780_0002_01_000001"})
+    .then(
+      function(value){
+        assert.ok(value);
+        assert.ok(value.nodeContainer);
+        assert.deepEqual(value.nodeContainer, response);
+        assert.ok(value.nodeInfo);
+        assert.equal(value.nodeInfo.addr, 'localhost:8042');
+        assert.equal(value.nodeInfo.id, 'localhost:64318');
+      }
+    );
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-containers-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-containers-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-containers-test.js
index 8359713..5c25ca2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-containers-test.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-containers-test.js
@@ -17,6 +17,7 @@
  */
 
 import { moduleFor, test } from 'ember-qunit';
+import Ember from 'ember';
 
 moduleFor('route:yarn-node-containers', 'Unit | Route | NodeContainers', {
 });
@@ -42,7 +43,7 @@ test('Test getting apps on a node', function(assert) {
       nodeId:"localhost:64318",containerLogFiles:["syslog","stderr",
       "syslog.shuffle","stdout"]}];
   var store = {
-    query: function(type, query) {
+    query: function() {
       return new Ember.RSVP.Promise(function(resolve) {
         resolve(response.slice());
       });
@@ -51,18 +52,17 @@ test('Test getting apps on a node', function(assert) {
   assert.expect(8);
   var route = this.subject();
   route.set('store', store);
-  var model =
-      route.model({node_id:"localhost:64318", node_addr:"localhost:8042"}).
-      then(
-        function(value){
-          assert.ok(value);
-          assert.ok(value.containers);
-          assert.equal(value.containers.length, 2);
-          assert.deepEqual(value.containers[0], response[0]);
-          assert.deepEqual(value.containers[1], response[1]);
-          assert.ok(value.nodeInfo);
-          assert.equal(value.nodeInfo.addr, 'localhost:8042');
-          assert.equal(value.nodeInfo.id, 'localhost:64318');
-        }
-      );
+  route.model({node_id:"localhost:64318", node_addr:"localhost:8042"})
+    .then(
+      function(value){
+        assert.ok(value);
+        assert.ok(value.containers);
+        assert.equal(value.containers.length, 2);
+        assert.deepEqual(value.containers[0], response[0]);
+        assert.deepEqual(value.containers[1], response[1]);
+        assert.ok(value.nodeInfo);
+        assert.equal(value.nodeInfo.addr, 'localhost:8042');
+        assert.equal(value.nodeInfo.id, 'localhost:64318');
+      }
+    );
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-test.js
index 4e82f1b..d0b0553 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-test.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-node-test.js
@@ -61,11 +61,11 @@ test('Test getting a node', function(assert) {
   // Create store which returns appropriate responses.
   var store = {
     findRecord: function(type) {
-      if (type == 'yarnNode') {
+      if (type === 'yarnNode') {
         return new Ember.RSVP.Promise(function(resolve) {
           resolve(nodeResponse);
         });
-      } else if (type == 'yarnRmNode') {
+      } else if (type === 'yarnRmNode') {
         return new Ember.RSVP.Promise(function(resolve) {
           resolve(rmNodeResponse);
         });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6bae5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-nodes-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-nodes-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-nodes-test.js
index baa5bd6..8a1ce59 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-nodes-test.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-nodes-test.js
@@ -57,7 +57,7 @@ test('Test getting nodes', function(assert) {
         containersCPUUsage: 0
       }}];
   var store = {
-    findAll: function(type) {
+    findAll: function() {
       return new Ember.RSVP.Promise(function(resolve) {
         resolve(response);
       });


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/23] hadoop git commit: HDFS-11370. Optimize NamenodeFsck#getReplicaInfo. Contributed Takanobu Asanuma.

Posted by jh...@apache.org.
HDFS-11370. Optimize NamenodeFsck#getReplicaInfo. Contributed Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6f290d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6f290d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6f290d5

Branch: refs/heads/YARN-5734
Commit: b6f290d5b660ad157c7076767c619d02b3d0f894
Parents: 59c5f18
Author: Jing Zhao <ji...@apache.org>
Authored: Wed Feb 1 11:21:35 2017 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Wed Feb 1 11:21:35 2017 -0800

----------------------------------------------------------------------
 .../BlockUnderConstructionFeature.java          | 25 +++++++++++++++++
 .../hdfs/server/namenode/NamenodeFsck.java      | 28 +++++++++++---------
 2 files changed, 41 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6f290d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
index b935f43..7453184 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
@@ -24,7 +24,9 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 
 import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
+import java.util.NoSuchElementException;
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState.COMPLETE;
 
@@ -110,6 +112,29 @@ public class BlockUnderConstructionFeature {
   }
 
   /**
+   * Note that this iterator doesn't guarantee thread-safe. It depends on
+   * external mechanisms such as the FSNamesystem lock for protection.
+   */
+  public Iterator<DatanodeStorageInfo> getExpectedStorageLocationsIterator() {
+    return new Iterator<DatanodeStorageInfo>() {
+      private int index = 0;
+
+      @Override
+      public boolean hasNext() {
+        return index <  replicas.length;
+      }
+
+      @Override
+      public DatanodeStorageInfo next() {
+        if (!hasNext()) {
+          throw new NoSuchElementException();
+        }
+        return replicas[index++].getExpectedStorageLocation();
+      }
+    };
+  }
+
+  /**
    * @return the index array indicating the block index in each storage. Used
    * only by striped blocks.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6f290d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 859f541..7e91935 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -589,23 +589,27 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
       return "";
     }
     final boolean isComplete = storedBlock.isComplete();
-    DatanodeStorageInfo[] storages = isComplete ?
-        blockManager.getStorages(storedBlock) :
-        storedBlock.getUnderConstructionFeature().getExpectedStorageLocations();
+    Iterator<DatanodeStorageInfo> storagesItr;
     StringBuilder sb = new StringBuilder(" [");
     final boolean isStriped = storedBlock.isStriped();
     Map<DatanodeStorageInfo, Long> storage2Id = new HashMap<>();
-    if (isStriped && isComplete) {
-      long blockId = storedBlock.getBlockId();
-      Iterable<StorageAndBlockIndex> sis =
-          ((BlockInfoStriped)storedBlock).getStorageAndIndexInfos();
-      for (StorageAndBlockIndex si: sis){
-        storage2Id.put(si.getStorage(), blockId + si.getBlockIndex());
+    if (isComplete) {
+      if (isStriped) {
+        long blockId = storedBlock.getBlockId();
+        Iterable<StorageAndBlockIndex> sis =
+            ((BlockInfoStriped) storedBlock).getStorageAndIndexInfos();
+        for (StorageAndBlockIndex si : sis) {
+          storage2Id.put(si.getStorage(), blockId + si.getBlockIndex());
+        }
       }
+      storagesItr = storedBlock.getStorageInfos();
+    } else {
+      storagesItr = storedBlock.getUnderConstructionFeature()
+          .getExpectedStorageLocationsIterator();
     }
 
-    for (int i = 0; i < storages.length; i++) {
-      DatanodeStorageInfo storage = storages[i];
+    while (storagesItr.hasNext()) {
+      DatanodeStorageInfo storage = storagesItr.next();
       if (isStriped && isComplete) {
         long index = storage2Id.get(storage);
         sb.append("blk_" + index + ":");
@@ -649,7 +653,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
           sb.append("LIVE)");
         }
       }
-      if (i < storages.length - 1) {
+      if (storagesItr.hasNext()) {
         sb.append(", ");
       }
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/23] hadoop git commit: YARN-6100. Improve YARN webservice to output aggregated container logs. Contributed by Xuan Gong.

Posted by jh...@apache.org.
YARN-6100. Improve YARN webservice to output aggregated container logs. Contributed by Xuan Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/327c9980
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/327c9980
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/327c9980

Branch: refs/heads/YARN-5734
Commit: 327c9980aafce52cc02d2b8885fc4e9f628ab23c
Parents: 2a942ee
Author: Junping Du <ju...@apache.org>
Authored: Thu Feb 2 00:41:18 2017 -0800
Committer: Junping Du <ju...@apache.org>
Committed: Thu Feb 2 00:41:18 2017 -0800

----------------------------------------------------------------------
 .../apache/hadoop/yarn/client/cli/LogsCLI.java  |  17 --
 .../yarn/logaggregation/LogToolUtils.java       | 158 ++++++++++++++
 .../webapp/AHSWebServices.java                  | 210 ++++---------------
 .../webapp/TestAHSWebServices.java              |  29 ++-
 .../nodemanager/webapp/NMWebServices.java       |  93 +++++---
 .../nodemanager/webapp/TestNMWebServices.java   |  59 ++++--
 6 files changed, 332 insertions(+), 234 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/327c9980/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index 1de4cd1..3cb1c7d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -64,7 +64,6 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.logaggregation.ContainerLogsRequest;
 import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers;
 import org.apache.hadoop.yarn.logaggregation.PerContainerLogFileInfo;
-import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
@@ -509,17 +508,9 @@ public class LogsCLI extends Configured implements Tool {
       newOptions.setLogTypes(matchedFiles);
 
       Client webServiceClient = Client.create();
-      String containerString = String.format(
-          LogCLIHelpers.CONTAINER_ON_NODE_PATTERN, containerIdStr, nodeId);
-      out.println(containerString);
-      out.println(StringUtils.repeat("=", containerString.length()));
       boolean foundAnyLogs = false;
       byte[] buffer = new byte[65536];
       for (String logFile : newOptions.getLogTypes()) {
-        out.println("LogType:" + logFile);
-        out.println("Log Upload Time:"
-            + Times.format(System.currentTimeMillis()));
-        out.println("Log Contents:");
         InputStream is = null;
         try {
           ClientResponse response = getResponeFromNMWebService(conf,
@@ -541,14 +532,6 @@ public class LogsCLI extends Configured implements Tool {
                 response.getEntity(String.class));
             out.println(msg);
           }
-          StringBuilder sb = new StringBuilder();
-          sb.append("End of LogType:" + logFile + ".");
-          if (request.getContainerState() == ContainerState.RUNNING) {
-            sb.append(" This log file belongs"
-                + " to a running container (" + containerIdStr + ") and so may"
-                + " not be complete.");
-          }
-          out.println(sb.toString());
           out.flush();
           foundAnyLogs = true;
         } catch (ClientHandlerException | UniformInterfaceException ex) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/327c9980/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java
index e117736..d83a8ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java
@@ -20,11 +20,17 @@ package org.apache.hadoop.yarn.logaggregation;
 import java.io.DataInputStream;
 import java.io.EOFException;
 import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.List;
+import org.apache.commons.lang.StringUtils;
 import org.apache.commons.math3.util.Pair;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.HarFs;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
@@ -40,6 +46,9 @@ public final class LogToolUtils {
 
   private LogToolUtils() {}
 
+  public static final String CONTAINER_ON_NODE_PATTERN =
+      "Container: %s on %s";
+
   /**
    * Return a list of {@link ContainerLogMeta} for a container
    * from Remote FileSystem.
@@ -114,4 +123,153 @@ public final class LogToolUtils {
     }
     return containersLogMeta;
   }
+
+  /**
+   * Output container log.
+   * @param containerId the containerId
+   * @param nodeId the nodeId
+   * @param fileName the log file name
+   * @param fileLength the log file length
+   * @param outputSize the output size
+   * @param lastModifiedTime the log file last modified time
+   * @param fis the log file input stream
+   * @param os the output stream
+   * @param buf the buffer
+   * @param logType the log type.
+   * @throws IOException if we can not access the log file.
+   */
+  public static void outputContainerLog(String containerId, String nodeId,
+      String fileName, long fileLength, long outputSize,
+      String lastModifiedTime, InputStream fis, OutputStream os,
+      byte[] buf, ContainerLogType logType) throws IOException {
+    long toSkip = 0;
+    long totalBytesToRead = fileLength;
+    long skipAfterRead = 0;
+    if (outputSize < 0) {
+      long absBytes = Math.abs(outputSize);
+      if (absBytes < fileLength) {
+        toSkip = fileLength - absBytes;
+        totalBytesToRead = absBytes;
+      }
+      org.apache.hadoop.io.IOUtils.skipFully(fis, toSkip);
+    } else {
+      if (outputSize < fileLength) {
+        totalBytesToRead = outputSize;
+        skipAfterRead = fileLength - outputSize;
+      }
+    }
+
+    long curRead = 0;
+    long pendingRead = totalBytesToRead - curRead;
+    int toRead = pendingRead > buf.length ? buf.length
+        : (int) pendingRead;
+    int len = fis.read(buf, 0, toRead);
+    boolean keepGoing = (len != -1 && curRead < totalBytesToRead);
+    if (keepGoing) {
+      StringBuilder sb = new StringBuilder();
+      String containerStr = String.format(
+          LogToolUtils.CONTAINER_ON_NODE_PATTERN,
+          containerId, nodeId);
+      sb.append(containerStr + "\n");
+      sb.append("LogType: " + logType + "\n");
+      sb.append(StringUtils.repeat("=", containerStr.length()) + "\n");
+      sb.append("FileName:" + fileName + "\n");
+      sb.append("LogLastModifiedTime:" + lastModifiedTime + "\n");
+      sb.append("LogLength:" + Long.toString(fileLength) + "\n");
+      sb.append("LogContents:\n");
+      byte[] b = sb.toString().getBytes(
+          Charset.forName("UTF-8"));
+      os.write(b, 0, b.length);
+    }
+    while (keepGoing) {
+      os.write(buf, 0, len);
+      curRead += len;
+
+      pendingRead = totalBytesToRead - curRead;
+      toRead = pendingRead > buf.length ? buf.length
+          : (int) pendingRead;
+      len = fis.read(buf, 0, toRead);
+      keepGoing = (len != -1 && curRead < totalBytesToRead);
+    }
+    org.apache.hadoop.io.IOUtils.skipFully(fis, skipAfterRead);
+    os.flush();
+  }
+
+  public static boolean outputAggregatedContainerLog(Configuration conf,
+      ApplicationId appId, String appOwner,
+      String containerId, String nodeId,
+      String logFileName, long outputSize, OutputStream os,
+      byte[] buf) throws IOException {
+    boolean findLogs = false;
+    RemoteIterator<FileStatus> nodeFiles = LogAggregationUtils
+        .getRemoteNodeFileDir(conf, appId, appOwner);
+    while (nodeFiles != null && nodeFiles.hasNext()) {
+      final FileStatus thisNodeFile = nodeFiles.next();
+      String nodeName = thisNodeFile.getPath().getName();
+      if (nodeName.equals(appId + ".har")) {
+        Path p = new Path("har:///"
+            + thisNodeFile.getPath().toUri().getRawPath());
+        nodeFiles = HarFs.get(p.toUri(), conf).listStatusIterator(p);
+        continue;
+      }
+      if ((nodeId == null || nodeName.contains(LogAggregationUtils
+          .getNodeString(nodeId))) && !nodeName.endsWith(
+              LogAggregationUtils.TMP_FILE_SUFFIX)) {
+        AggregatedLogFormat.LogReader reader = null;
+        try {
+          reader = new AggregatedLogFormat.LogReader(conf,
+              thisNodeFile.getPath());
+          DataInputStream valueStream;
+          LogKey key = new LogKey();
+          valueStream = reader.next(key);
+          while (valueStream != null && !key.toString()
+              .equals(containerId)) {
+            // Next container
+            key = new LogKey();
+            valueStream = reader.next(key);
+          }
+          if (valueStream == null) {
+            continue;
+          }
+          while (true) {
+            try {
+              String fileType = valueStream.readUTF();
+              String fileLengthStr = valueStream.readUTF();
+              long fileLength = Long.parseLong(fileLengthStr);
+              if (fileType.equalsIgnoreCase(logFileName)) {
+                LogToolUtils.outputContainerLog(containerId,
+                    nodeId, fileType, fileLength, outputSize,
+                    Times.format(thisNodeFile.getModificationTime()),
+                    valueStream, os, buf, ContainerLogType.AGGREGATED);
+                StringBuilder sb = new StringBuilder();
+                String endOfFile = "End of LogFile:" + fileType;
+                sb.append("\n" + endOfFile + "\n");
+                sb.append(StringUtils.repeat("*", endOfFile.length() + 50)
+                    + "\n\n");
+                byte[] b = sb.toString().getBytes(Charset.forName("UTF-8"));
+                os.write(b, 0, b.length);
+                findLogs = true;
+              } else {
+                long totalSkipped = 0;
+                long currSkipped = 0;
+                while (currSkipped != -1 && totalSkipped < fileLength) {
+                  currSkipped = valueStream.skip(
+                      fileLength - totalSkipped);
+                  totalSkipped += currSkipped;
+                }
+              }
+            } catch (EOFException eof) {
+              break;
+            }
+          }
+        } finally {
+          if (reader != null) {
+            reader.close();
+          }
+        }
+      }
+    }
+    os.flush();
+    return findLogs;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/327c9980/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index 9bac474..a10bfac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
 
-import java.io.DataInputStream;
-import java.io.EOFException;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.nio.charset.Charset;
@@ -43,12 +41,10 @@ import javax.ws.rs.core.Response;
 import javax.ws.rs.core.StreamingOutput;
 import javax.ws.rs.core.Response.ResponseBuilder;
 import javax.ws.rs.core.Response.Status;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.http.JettyUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -56,13 +52,9 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat;
 import org.apache.hadoop.yarn.logaggregation.ContainerLogMeta;
 import org.apache.hadoop.yarn.logaggregation.ContainerLogType;
-import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
 import org.apache.hadoop.yarn.logaggregation.LogToolUtils;
-import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
 import org.apache.hadoop.yarn.server.webapp.WebServices;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo;
@@ -71,11 +63,11 @@ import org.apache.hadoop.yarn.server.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainerLogsInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
-import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
 import org.apache.hadoop.yarn.webapp.NotFoundException;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.inject.Inject;
 import com.google.inject.Singleton;
@@ -360,27 +352,27 @@ public class AHSWebServices extends WebServices {
     } catch (Exception ex) {
       // directly find logs from HDFS.
       return sendStreamOutputResponse(appId, null, null, containerIdStr,
-          filename, format, length);
+          filename, format, length, false);
     }
     String appOwner = appInfo.getUser();
+    if (isFinishedState(appInfo.getAppState())) {
+      // directly find logs from HDFS.
+      return sendStreamOutputResponse(appId, appOwner, null, containerIdStr,
+          filename, format, length, false);
+    }
 
-    ContainerInfo containerInfo;
-    try {
-      containerInfo = super.getContainer(
-          req, res, appId.toString(),
-          containerId.getApplicationAttemptId().toString(),
-          containerId.toString());
-    } catch (Exception ex) {
-      if (isFinishedState(appInfo.getAppState())) {
-        // directly find logs from HDFS.
+    if (isRunningState(appInfo.getAppState())) {
+      ContainerInfo containerInfo;
+      try {
+        containerInfo = super.getContainer(
+            req, res, appId.toString(),
+            containerId.getApplicationAttemptId().toString(),
+            containerId.toString());
+      } catch (Exception ex) {
+        // output the aggregated logs
         return sendStreamOutputResponse(appId, appOwner, null, containerIdStr,
-            filename, format, length);
+            filename, format, length, true);
       }
-      return createBadResponse(Status.INTERNAL_SERVER_ERROR,
-          "Can not get ContainerInfo for the container: " + containerId);
-    }
-    String nodeId = containerInfo.getNodeId();
-    if (isRunningState(appInfo.getAppState())) {
       String nodeHttpAddress = containerInfo.getNodeHttpAddress();
       String uri = "/" + containerId.toString() + "/logs/" + filename;
       String resURI = JOINER.join(nodeHttpAddress, NM_DOWNLOAD_URI_STR, uri);
@@ -392,9 +384,6 @@ public class AHSWebServices extends WebServices {
           HttpServletResponse.SC_TEMPORARY_REDIRECT);
       response.header("Location", resURI);
       return response.build();
-    } else if (isFinishedState(appInfo.getAppState())) {
-      return sendStreamOutputResponse(appId, appOwner, nodeId,
-          containerIdStr, filename, format, length);
     } else {
       return createBadResponse(Status.NOT_FOUND,
           "The application is not at Running or Finished State.");
@@ -419,7 +408,8 @@ public class AHSWebServices extends WebServices {
 
   private Response sendStreamOutputResponse(ApplicationId appId,
       String appOwner, String nodeId, String containerIdStr,
-      String fileName, String format, long bytes) {
+      String fileName, String format, long bytes,
+      boolean printEmptyLocalContainerLog) {
     String contentType = WebAppUtils.getDefaultLogContentType();
     if (format != null && !format.isEmpty()) {
       contentType = WebAppUtils.getSupportedLogContentType(format);
@@ -433,15 +423,11 @@ public class AHSWebServices extends WebServices {
     StreamingOutput stream = null;
     try {
       stream = getStreamingOutput(appId, appOwner, nodeId,
-          containerIdStr, fileName, bytes);
+          containerIdStr, fileName, bytes, printEmptyLocalContainerLog);
     } catch (Exception ex) {
       return createBadResponse(Status.INTERNAL_SERVER_ERROR,
           ex.getMessage());
     }
-    if (stream == null) {
-      return createBadResponse(Status.INTERNAL_SERVER_ERROR,
-          "Can not get log for container: " + containerIdStr);
-    }
     ResponseBuilder response = Response.ok(stream);
     response.header("Content-Type", contentType);
     // Sending the X-Content-Type-Options response header with the value
@@ -451,146 +437,30 @@ public class AHSWebServices extends WebServices {
     return response.build();
   }
 
-  private StreamingOutput getStreamingOutput(ApplicationId appId,
-      String appOwner, final String nodeId, final String containerIdStr,
-      final String logFile, final long bytes) throws IOException{
-    String suffix = LogAggregationUtils.getRemoteNodeLogDirSuffix(conf);
-    org.apache.hadoop.fs.Path remoteRootLogDir = new org.apache.hadoop.fs.Path(
-        conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
-            YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
-    org.apache.hadoop.fs.Path qualifiedRemoteRootLogDir =
-        FileContext.getFileContext(conf).makeQualified(remoteRootLogDir);
-    FileContext fc = FileContext.getFileContext(
-        qualifiedRemoteRootLogDir.toUri(), conf);
-    org.apache.hadoop.fs.Path remoteAppDir = null;
-    if (appOwner == null) {
-      org.apache.hadoop.fs.Path toMatch = LogAggregationUtils
-          .getRemoteAppLogDir(remoteRootLogDir, appId, "*", suffix);
-      FileStatus[] matching  = fc.util().globStatus(toMatch);
-      if (matching == null || matching.length != 1) {
-        return null;
-      }
-      remoteAppDir = matching[0].getPath();
-    } else {
-      remoteAppDir = LogAggregationUtils
-          .getRemoteAppLogDir(remoteRootLogDir, appId, appOwner, suffix);
-    }
-    final RemoteIterator<FileStatus> nodeFiles;
-    nodeFiles = fc.listStatus(remoteAppDir);
-    if (!nodeFiles.hasNext()) {
-      return null;
-    }
-
+  private StreamingOutput getStreamingOutput(final ApplicationId appId,
+      final String appOwner, final String nodeId, final String containerIdStr,
+      final String logFile, final long bytes,
+      final boolean printEmptyLocalContainerLog) throws IOException{
     StreamingOutput stream = new StreamingOutput() {
 
       @Override
       public void write(OutputStream os) throws IOException,
           WebApplicationException {
         byte[] buf = new byte[65535];
-        boolean findLogs = false;
-        while (nodeFiles.hasNext()) {
-          final FileStatus thisNodeFile = nodeFiles.next();
-          String nodeName = thisNodeFile.getPath().getName();
-          if ((nodeId == null || nodeName.contains(LogAggregationUtils
-              .getNodeString(nodeId))) && !nodeName.endsWith(
-              LogAggregationUtils.TMP_FILE_SUFFIX)) {
-            AggregatedLogFormat.LogReader reader = null;
-            try {
-              reader = new AggregatedLogFormat.LogReader(conf,
-                  thisNodeFile.getPath());
-              DataInputStream valueStream;
-              LogKey key = new LogKey();
-              valueStream = reader.next(key);
-              while (valueStream != null && !key.toString()
-                  .equals(containerIdStr)) {
-                // Next container
-                key = new LogKey();
-                valueStream = reader.next(key);
-              }
-              if (valueStream == null) {
-                continue;
-              }
-              while (true) {
-                try {
-                  String fileType = valueStream.readUTF();
-                  String fileLengthStr = valueStream.readUTF();
-                  long fileLength = Long.parseLong(fileLengthStr);
-                  if (fileType.equalsIgnoreCase(logFile)) {
-                    StringBuilder sb = new StringBuilder();
-                    sb.append("LogType:");
-                    sb.append(fileType + "\n");
-                    sb.append("Log Upload Time:");
-                    sb.append(Times.format(System.currentTimeMillis()) + "\n");
-                    sb.append("LogLength:");
-                    sb.append(fileLengthStr + "\n");
-                    sb.append("Log Contents:\n");
-                    byte[] b = sb.toString().getBytes(
-                        Charset.forName("UTF-8"));
-                    os.write(b, 0, b.length);
-
-                    long toSkip = 0;
-                    long totalBytesToRead = fileLength;
-                    long skipAfterRead = 0;
-                    if (bytes < 0) {
-                      long absBytes = Math.abs(bytes);
-                      if (absBytes < fileLength) {
-                        toSkip = fileLength - absBytes;
-                        totalBytesToRead = absBytes;
-                      }
-                      org.apache.hadoop.io.IOUtils.skipFully(
-                          valueStream, toSkip);
-                    } else {
-                      if (bytes < fileLength) {
-                        totalBytesToRead = bytes;
-                        skipAfterRead = fileLength - bytes;
-                      }
-                    }
-
-                    long curRead = 0;
-                    long pendingRead = totalBytesToRead - curRead;
-                    int toRead = pendingRead > buf.length ? buf.length
-                        : (int) pendingRead;
-                    int len = valueStream.read(buf, 0, toRead);
-                    while (len != -1 && curRead < totalBytesToRead) {
-                      os.write(buf, 0, len);
-                      curRead += len;
-
-                      pendingRead = totalBytesToRead - curRead;
-                      toRead = pendingRead > buf.length ? buf.length
-                          : (int) pendingRead;
-                      len = valueStream.read(buf, 0, toRead);
-                    }
-                    org.apache.hadoop.io.IOUtils.skipFully(
-                        valueStream, skipAfterRead);
-                    sb = new StringBuilder();
-                    sb.append("\nEnd of LogType:" + fileType + "\n");
-                    b = sb.toString().getBytes(Charset.forName("UTF-8"));
-                    os.write(b, 0, b.length);
-                    findLogs = true;
-                  } else {
-                    long totalSkipped = 0;
-                    long currSkipped = 0;
-                    while (currSkipped != -1 && totalSkipped < fileLength) {
-                      currSkipped = valueStream.skip(
-                          fileLength - totalSkipped);
-                      totalSkipped += currSkipped;
-                    }
-                  }
-                } catch (EOFException eof) {
-                  break;
-                }
-              }
-            } finally {
-              if (reader != null) {
-                reader.close();
-              }
-            }
-          }
-        }
-        os.flush();
+        boolean findLogs = LogToolUtils.outputAggregatedContainerLog(conf,
+            appId, appOwner, containerIdStr, nodeId, logFile, bytes, os, buf);
         if (!findLogs) {
           throw new IOException("Can not find logs for container:"
               + containerIdStr);
+        } else {
+          if (printEmptyLocalContainerLog) {
+            StringBuilder sb = new StringBuilder();
+            sb.append(containerIdStr + "\n");
+            sb.append("LogType: " + ContainerLogType.LOCAL + "\n");
+            sb.append("LogContents:\n");
+            sb.append(getNoRedirectWarning() + "\n");
+            os.write(sb.toString().getBytes(Charset.forName("UTF-8")));
+          }
         }
       }
     };
@@ -640,4 +510,12 @@ public class AHSWebServices extends WebServices {
       throw new WebApplicationException(ex);
     }
   }
+
+  @Private
+  @VisibleForTesting
+  public static String getNoRedirectWarning() {
+    return "We do not have NodeManager web address, so we can not "
+        + "re-direct the request to related NodeManager "
+        + "for local container logs.";
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/327c9980/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
index f553bb0..3d1c901 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
@@ -35,7 +35,7 @@ import javax.servlet.FilterConfig;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.core.MediaType;
-
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -584,7 +584,10 @@ public class TestAHSWebServices extends JerseyTestBase {
     responseText = response.getEntity(String.class);
     assertTrue(responseText.contains("Hello." + containerId1ForApp100));
     int fullTextSize = responseText.getBytes().length;
-    int tailTextSize = "\nEnd of LogType:syslog\n".getBytes().length;
+    String tailEndSeparator = StringUtils.repeat("*",
+        "End of LogFile:syslog".length() + 50) + "\n\n";
+    int tailTextSize = "\nEnd of LogFile:syslog\n".getBytes().length
+        + tailEndSeparator.getBytes().length;
 
     String logMessage = "Hello." + containerId1ForApp100;
     int fileContentSize = logMessage.getBytes().length;
@@ -685,6 +688,28 @@ public class TestAHSWebServices extends JerseyTestBase {
     assertTrue(redirectURL.contains(containerId1.toString()));
     assertTrue(redirectURL.contains("/logs/" + fileName));
     assertTrue(redirectURL.contains("user.name=" + user));
+
+    // If we can not container information from ATS, we would try to
+    // get aggregated log from remote FileSystem.
+    ContainerId containerId1000 = ContainerId.newContainerId(
+        appAttemptId, 1000);
+    String content = "Hello." + containerId1000;
+    NodeId nodeId = NodeId.newInstance("test host", 100);
+    TestContainerLogsUtils.createContainerLogFileInRemoteFS(conf, fs,
+        rootLogDir, containerId1000, nodeId, fileName, user, content, true);
+    r = resource();
+    ClientResponse response = r.path("ws").path("v1")
+        .path("applicationhistory").path("containerlogs")
+        .path(containerId1000.toString()).path(fileName)
+        .queryParam("user.name", user)
+        .accept(MediaType.TEXT_PLAIN)
+        .get(ClientResponse.class);
+    String responseText = response.getEntity(String.class);
+    assertTrue(responseText.contains(content));
+    // Also test whether we output the empty local container log, and give
+    // the warning message.
+    assertTrue(responseText.contains("LogType: " + ContainerLogType.LOCAL));
+    assertTrue(responseText.contains(AHSWebServices.getNoRedirectWarning()));
   }
 
   @Test(timeout = 10000)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/327c9980/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
index 1357d5a..07acd4b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
@@ -21,6 +21,7 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.OutputStream;
+import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map.Entry;
@@ -41,6 +42,9 @@ import javax.ws.rs.core.Response.Status;
 import javax.ws.rs.core.StreamingOutput;
 import javax.ws.rs.core.UriInfo;
 import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.http.JettyUtils;
@@ -57,6 +61,7 @@ import org.apache.hadoop.yarn.server.nodemanager.ResourceView;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
 import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainerInfo;
@@ -64,6 +69,7 @@ import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NMContainerLogsInfo;
 import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainersInfo;
 import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NodeInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainerLogsInfo;
+import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
 import org.apache.hadoop.yarn.webapp.NotFoundException;
 import org.apache.hadoop.yarn.webapp.WebApp;
@@ -74,6 +80,7 @@ import com.google.inject.Singleton;
 @Singleton
 @Path("/ws/v1/node")
 public class NMWebServices {
+  private static final Log LOG = LogFactory.getLog(NMWebServices.class);
   private Context nmContext;
   private ResourceView rview;
   private WebApp webapp;
@@ -330,17 +337,32 @@ public class NMWebServices {
   @Produces({ MediaType.TEXT_PLAIN + "; " + JettyUtils.UTF_8 })
   @Public
   @Unstable
-  public Response getLogs(@PathParam("containerid") String containerIdStr,
+  public Response getLogs(
+      @PathParam("containerid") final String containerIdStr,
       @PathParam("filename") String filename,
       @QueryParam("format") String format,
       @QueryParam("size") String size) {
-    ContainerId containerId;
+    ContainerId tempContainerId;
     try {
-      containerId = ContainerId.fromString(containerIdStr);
+      tempContainerId = ContainerId.fromString(containerIdStr);
     } catch (IllegalArgumentException ex) {
       return Response.status(Status.BAD_REQUEST).build();
     }
-    
+    final ContainerId containerId = tempContainerId;
+    boolean tempIsRunning = false;
+    // check what is the status for container
+    try {
+      Container container = nmContext.getContainers().get(containerId);
+      tempIsRunning = (container.getContainerState() == ContainerState.RUNNING);
+    } catch (Exception ex) {
+      // This NM does not have this container any more. We
+      // assume the container has already finished.
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Can not find the container:" + containerId
+            + " in this node.");
+      }
+    }
+    final boolean isRunning = tempIsRunning;
     File logFile = null;
     try {
       logFile = ContainerLogsUtils.getContainerLogFile(
@@ -351,6 +373,8 @@ public class NMWebServices {
       return Response.serverError().entity(ex.getMessage()).build();
     }
     final long bytes = parseLongParam(size);
+    final String lastModifiedTime = Times.format(logFile.lastModified());
+    final String outputFileName = filename;
     String contentType = WebAppUtils.getDefaultLogContentType();
     if (format != null && !format.isEmpty()) {
       contentType = WebAppUtils.getSupportedLogContentType(format);
@@ -374,39 +398,40 @@ public class NMWebServices {
           try {
             int bufferSize = 65536;
             byte[] buf = new byte[bufferSize];
-            long toSkip = 0;
-            long totalBytesToRead = fileLength;
-            long skipAfterRead = 0;
-            if (bytes < 0) {
-              long absBytes = Math.abs(bytes);
-              if (absBytes < fileLength) {
-                toSkip = fileLength - absBytes;
-                totalBytesToRead = absBytes;
-              }
-              org.apache.hadoop.io.IOUtils.skipFully(fis, toSkip);
+            LogToolUtils.outputContainerLog(containerId.toString(),
+                nmContext.getNodeId().toString(), outputFileName, fileLength,
+                bytes, lastModifiedTime, fis, os, buf, ContainerLogType.LOCAL);
+            StringBuilder sb = new StringBuilder();
+            String endOfFile = "End of LogFile:" + outputFileName;
+            sb.append(endOfFile + ".");
+            if (isRunning) {
+              sb.append("This log file belongs to a running container ("
+                  + containerIdStr + ") and so may not be complete." + "\n");
             } else {
-              if (bytes < fileLength) {
-                totalBytesToRead = bytes;
-                skipAfterRead = fileLength - bytes;
-              }
+              sb.append("\n");
             }
-
-            long curRead = 0;
-            long pendingRead = totalBytesToRead - curRead;
-            int toRead = pendingRead > buf.length ? buf.length
-                : (int) pendingRead;
-            int len = fis.read(buf, 0, toRead);
-            while (len != -1 && curRead < totalBytesToRead) {
-              os.write(buf, 0, len);
-              curRead += len;
-
-              pendingRead = totalBytesToRead - curRead;
-              toRead = pendingRead > buf.length ? buf.length
-                  : (int) pendingRead;
-              len = fis.read(buf, 0, toRead);
+            sb.append(StringUtils.repeat("*", endOfFile.length() + 50)
+                + "\n\n");
+            os.write(sb.toString().getBytes(Charset.forName("UTF-8")));
+            // If we have aggregated logs for this container,
+            // output the aggregation logs as well.
+            ApplicationId appId = containerId.getApplicationAttemptId()
+                .getApplicationId();
+            Application app = nmContext.getApplications().get(appId);
+            String appOwner = app == null ? null : app.getUser();
+            try {
+              LogToolUtils.outputAggregatedContainerLog(nmContext.getConf(),
+                  appId, appOwner, containerId.toString(),
+                  nmContext.getNodeId().toString(), outputFileName, bytes,
+                  os, buf);
+            } catch (Exception ex) {
+              // Something wrong when we try to access the aggregated log.
+              if (LOG.isDebugEnabled()) {
+                LOG.debug("Can not access the aggregated log for "
+                    + "the container:" + containerId);
+                LOG.debug(ex.getMessage());
+              }
             }
-            org.apache.hadoop.io.IOUtils.skipFully(fis, skipAfterRead);
-            os.flush();
           } finally {
             IOUtils.closeQuietly(fis);
           }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/327c9980/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
index a6d4153..7764ceb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
@@ -384,8 +384,9 @@ public class TestNMWebServices extends JerseyTestBase {
     ClientResponse response = r.path(filename)
         .accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
     String responseText = response.getEntity(String.class);
-    assertEquals(logMessage, responseText);
-    int fullTextSize = responseText.getBytes().length;
+    String responseLogMessage = getLogContext(responseText);
+    assertEquals(logMessage, responseLogMessage);
+    int fullTextSize = responseLogMessage.getBytes().length;
 
     // specify how many bytes we should get from logs
     // specify a position number, it would get the first n bytes from
@@ -394,9 +395,10 @@ public class TestNMWebServices extends JerseyTestBase {
         .queryParam("size", "5")
         .accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
     responseText = response.getEntity(String.class);
-    assertEquals(5, responseText.getBytes().length);
-    assertEquals(new String(logMessage.getBytes(), 0, 5), responseText);
-    assertTrue(fullTextSize >= responseText.getBytes().length);
+    responseLogMessage = getLogContext(responseText);
+    assertEquals(5, responseLogMessage.getBytes().length);
+    assertEquals(new String(logMessage.getBytes(), 0, 5), responseLogMessage);
+    assertTrue(fullTextSize >= responseLogMessage.getBytes().length);
 
     // specify the bytes which is larger than the actual file size,
     // we would get the full logs
@@ -404,8 +406,9 @@ public class TestNMWebServices extends JerseyTestBase {
         .queryParam("size", "10000")
         .accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
     responseText = response.getEntity(String.class);
-    assertEquals(fullTextSize, responseText.getBytes().length);
-    assertEquals(logMessage, responseText);
+    responseLogMessage = getLogContext(responseText);
+    assertEquals(fullTextSize, responseLogMessage.getBytes().length);
+    assertEquals(logMessage, responseLogMessage);
 
     // specify a negative number, it would get the last n bytes from
     // container log
@@ -413,25 +416,28 @@ public class TestNMWebServices extends JerseyTestBase {
         .queryParam("size", "-5")
         .accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
     responseText = response.getEntity(String.class);
-    assertEquals(5, responseText.getBytes().length);
+    responseLogMessage = getLogContext(responseText);
+    assertEquals(5, responseLogMessage.getBytes().length);
     assertEquals(new String(logMessage.getBytes(),
-        logMessage.getBytes().length - 5, 5), responseText);
-    assertTrue(fullTextSize >= responseText.getBytes().length);
+        logMessage.getBytes().length - 5, 5), responseLogMessage);
+    assertTrue(fullTextSize >= responseLogMessage.getBytes().length);
 
     response = r.path(filename)
         .queryParam("size", "-10000")
         .accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
     responseText = response.getEntity(String.class);
+    responseLogMessage = getLogContext(responseText);
     assertEquals("text/plain; charset=utf-8", response.getType().toString());
-    assertEquals(fullTextSize, responseText.getBytes().length);
-    assertEquals(logMessage, responseText);
+    assertEquals(fullTextSize, responseLogMessage.getBytes().length);
+    assertEquals(logMessage, responseLogMessage);
 
     // ask and download it
     response = r.path(filename)
         .queryParam("format", "octet-stream")
         .accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
     responseText = response.getEntity(String.class);
-    assertEquals(logMessage, responseText);
+    responseLogMessage = getLogContext(responseText);
+    assertEquals(logMessage, responseLogMessage);
     assertEquals(200, response.getStatus());
     assertEquals("application/octet-stream; charset=utf-8",
         response.getType().toString());
@@ -475,10 +481,11 @@ public class TestNMWebServices extends JerseyTestBase {
         TestNMWebServices.class.getSimpleName() + "temp-log-dir");
     try {
       String aggregatedLogFile = filename + "-aggregated";
+      String aggregatedLogMessage = "This is aggregated ;og.";
       TestContainerLogsUtils.createContainerLogFileInRemoteFS(
           nmContext.getConf(), FileSystem.get(nmContext.getConf()),
           tempLogDir.getAbsolutePath(), containerId, nmContext.getNodeId(),
-          aggregatedLogFile, "user", logMessage, true);
+          aggregatedLogFile, "user", aggregatedLogMessage, true);
       r1 = resource();
       response = r1.path("ws").path("v1").path("node")
           .path("containers").path(containerIdStr)
@@ -501,6 +508,21 @@ public class TestNMWebServices extends JerseyTestBase {
           assertEquals(meta.get(0).getFileName(), filename);
         }
       }
+
+      // Test whether we could get aggregated log as well
+      TestContainerLogsUtils.createContainerLogFileInRemoteFS(
+          nmContext.getConf(), FileSystem.get(nmContext.getConf()),
+          tempLogDir.getAbsolutePath(), containerId, nmContext.getNodeId(),
+          filename, "user", aggregatedLogMessage, true);
+      response = r.path(filename)
+          .accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
+      responseText = response.getEntity(String.class);
+      assertTrue(responseText.contains("LogType: "
+          + ContainerLogType.AGGREGATED));
+      assertTrue(responseText.contains(aggregatedLogMessage));
+      assertTrue(responseText.contains("LogType: "
+              + ContainerLogType.LOCAL));
+      assertTrue(responseText.contains(logMessage));
     } finally {
       FileUtil.fullyDelete(tempLogDir);
     }
@@ -511,7 +533,7 @@ public class TestNMWebServices extends JerseyTestBase {
         r.path(filename).accept(MediaType.TEXT_PLAIN)
             .get(ClientResponse.class);
     responseText = response.getEntity(String.class);
-    assertEquals(logMessage, responseText);
+    assertTrue(responseText.contains(logMessage));
   }
 
   public void verifyNodesXML(NodeList nodes) throws JSONException, Exception {
@@ -601,4 +623,11 @@ public class TestNMWebServices extends JerseyTestBase {
         YarnVersionInfo.getVersion(), resourceManagerVersion);
   }
 
+  private String getLogContext(String fullMessage) {
+    String prefix = "LogContents:\n";
+    String postfix = "End of LogFile:";
+    int prefixIndex = fullMessage.indexOf(prefix) + prefix.length();
+    int postfixIndex = fullMessage.indexOf(postfix);
+    return fullMessage.substring(prefixIndex, postfixIndex);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/23] hadoop git commit: HDFS-11371. Document missing metrics of erasure coding. Contributed by Yiqun Lin.

Posted by jh...@apache.org.
HDFS-11371. Document missing metrics of erasure coding. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ea6d351
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ea6d351
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ea6d351

Branch: refs/heads/YARN-5734
Commit: 3ea6d3517e2f0448d91af73508ec9d10c4a3de21
Parents: cb8f3f3
Author: Yiqun Lin <yq...@apache.org>
Authored: Sat Feb 4 11:01:27 2017 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Sat Feb 4 11:01:27 2017 +0800

----------------------------------------------------------------------
 .../hadoop-common/src/site/markdown/Metrics.md                 | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ea6d351/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 32d081a..7900692 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -322,6 +322,12 @@ Each metrics record contains tags such as SessionId and Hostname as additional i
 | `RemoteBytesRead` | Number of bytes read by remote clients |
 | `RemoteBytesWritten` | Number of bytes written by remote clients |
 | `BPServiceActorInfo` | The information about a block pool service actor |
+| `EcReconstructionTasks` | Total number of erasure coding reconstruction tasks |
+| `EcFailedReconstructionTasks` | Total number of erasure coding failed reconstruction tasks |
+| `EcDecodingTimeNanos` | Total number of nanoseconds spent by decoding tasks |
+| `EcReconstructionBytesRead` | Total number of bytes read by erasure coding worker |
+| `EcReconstructionBytesWritten` | Total number of bytes written by erasure coding worker |
+| `EcReconstructionRemoteBytesRead` | Total number of bytes remote read by erasure coding worker |
 
 FsVolume
 --------


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/23] hadoop git commit: HADOOP-14053. Update the link to HTrace SpanReceivers. Contributed by Yiqun Lin.

Posted by jh...@apache.org.
HADOOP-14053. Update the link to HTrace SpanReceivers. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb8f3f3a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb8f3f3a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb8f3f3a

Branch: refs/heads/YARN-5734
Commit: cb8f3f3adf7a22e8fb6fdfc44f3ec781c1526bfd
Parents: ba75bc7
Author: Akira Ajisaka <aa...@apache.org>
Authored: Sat Feb 4 10:58:08 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Sat Feb 4 10:58:08 2017 +0900

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb8f3f3a/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md b/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
index 9b7084d..47b17a2 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
@@ -31,7 +31,7 @@ Setting up tracing is quite simple, however it requires some very minor changes
 
 The tracing system works by collecting information in structs called 'Spans'.
 It is up to you to choose how you want to receive this information
-by using implementation of [SpanReceiver](http://htrace.incubator.apache.org/#Span_Receivers)
+by using implementation of [SpanReceiver](http://htrace.incubator.apache.org/developer_guide.html#SpanReceivers)
 interface bundled with HTrace or implementing it by yourself.
 
 [HTrace](http://htrace.incubator.apache.org/) provides options such as


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/23] hadoop git commit: HADOOP-12097. Allow port range to be specified while starting webapp. Contributed by Varun Saxena.

Posted by jh...@apache.org.
HADOOP-12097. Allow port range to be specified while starting webapp. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cce35c38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cce35c38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cce35c38

Branch: refs/heads/YARN-5734
Commit: cce35c38159b23eb55204b3c9afcaa3215f4f4ef
Parents: d401e63
Author: Junping Du <ju...@apache.org>
Authored: Sun Feb 5 19:42:11 2017 -0800
Committer: Junping Du <ju...@apache.org>
Committed: Sun Feb 5 19:42:11 2017 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/conf/Configuration.java   |  12 ++
 .../org/apache/hadoop/http/HttpServer2.java     | 119 ++++++++++++++++---
 .../org/apache/hadoop/http/TestHttpServer.java  |  38 ++++++
 .../org/apache/hadoop/yarn/webapp/WebApps.java  |  38 ++++--
 .../apache/hadoop/yarn/webapp/TestWebApp.java   |  46 +++++++
 5 files changed, 227 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cce35c38/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index c0e42e5..bade06e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -1887,6 +1887,18 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       return result.toString();
     }
 
+    /**
+     * Get range start for the first integer range.
+     * @return range start.
+     */
+    public int getRangeStart() {
+      if (ranges == null || ranges.isEmpty()) {
+        return -1;
+      }
+      Range r = ranges.get(0);
+      return r.start;
+    }
+
     @Override
     public Iterator<Integer> iterator() {
       return new RangeNumberIterator(ranges);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cce35c38/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 06f493b..25a4037 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.ConfServlet;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configuration.IntegerRanges;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.jmx.JMXJsonServlet;
 import org.apache.hadoop.log.LogLevel;
@@ -151,6 +152,7 @@ public final class HttpServer2 implements FilterContainer {
 
   protected final WebAppContext webAppContext;
   protected final boolean findPort;
+  protected final IntegerRanges portRanges;
   private final Map<ServletContextHandler, Boolean> defaultContexts =
       new HashMap<>();
   protected final List<String> filterNames = new ArrayList<>();
@@ -189,6 +191,7 @@ public final class HttpServer2 implements FilterContainer {
     private String keyPassword;
 
     private boolean findPort;
+    private IntegerRanges portRanges = null;
 
     private String hostName;
     private boolean disallowFallbackToRandomSignerSecretProvider;
@@ -261,6 +264,11 @@ public final class HttpServer2 implements FilterContainer {
       return this;
     }
 
+    public Builder setPortRanges(IntegerRanges ranges) {
+      this.portRanges = ranges;
+      return this;
+    }
+
     public Builder setConf(Configuration conf) {
       this.conf = conf;
       return this;
@@ -496,6 +504,7 @@ public final class HttpServer2 implements FilterContainer {
     }
 
     this.findPort = b.findPort;
+    this.portRanges = b.portRanges;
     initializeWebServer(b.name, b.hostName, b.conf, b.pathSpecs);
   }
 
@@ -1080,6 +1089,93 @@ public final class HttpServer2 implements FilterContainer {
   }
 
   /**
+   * Bind listener by closing and opening the listener.
+   * @param listener
+   * @throws Exception
+   */
+  private static void bindListener(ServerConnector listener) throws Exception {
+    // jetty has a bug where you can't reopen a listener that previously
+    // failed to open w/o issuing a close first, even if the port is changed
+    listener.close();
+    listener.open();
+    LOG.info("Jetty bound to port " + listener.getLocalPort());
+  }
+
+  /**
+   * Create bind exception by wrapping the bind exception thrown.
+   * @param listener
+   * @param ex
+   * @return
+   */
+  private static BindException constructBindException(ServerConnector listener,
+      BindException ex) {
+    BindException be = new BindException("Port in use: "
+        + listener.getHost() + ":" + listener.getPort());
+    if (ex != null) {
+      be.initCause(ex);
+    }
+    return be;
+  }
+
+  /**
+   * Bind using single configured port. If findPort is true, we will try to bind
+   * after incrementing port till a free port is found.
+   * @param listener jetty listener.
+   * @param port port which is set in the listener.
+   * @throws Exception
+   */
+  private void bindForSinglePort(ServerConnector listener, int port)
+      throws Exception {
+    while (true) {
+      try {
+        bindListener(listener);
+        break;
+      } catch (BindException ex) {
+        if (port == 0 || !findPort) {
+          throw constructBindException(listener, ex);
+        }
+      }
+      // try the next port number
+      listener.setPort(++port);
+      Thread.sleep(100);
+    }
+  }
+
+  /**
+   * Bind using port ranges. Keep on looking for a free port in the port range
+   * and throw a bind exception if no port in the configured range binds.
+   * @param listener jetty listener.
+   * @param startPort initial port which is set in the listener.
+   * @throws Exception
+   */
+  private void bindForPortRange(ServerConnector listener, int startPort)
+      throws Exception {
+    BindException bindException = null;
+    try {
+      bindListener(listener);
+      return;
+    } catch (BindException ex) {
+      // Ignore exception.
+      bindException = ex;
+    }
+    for(Integer port : portRanges) {
+      if (port == startPort) {
+        continue;
+      }
+      Thread.sleep(100);
+      listener.setPort(port);
+      try {
+        bindListener(listener);
+        return;
+      } catch (BindException ex) {
+        // Ignore exception. Move to next port.
+        bindException = ex;
+      }
+    }
+    throw constructBindException(listener, bindException);
+  }
+
+  /**
    * Open the main listener for the server
    * @throws Exception
    */
@@ -1091,25 +1187,10 @@ public final class HttpServer2 implements FilterContainer {
         continue;
       }
       int port = listener.getPort();
-      while (true) {
-        // jetty has a bug where you can't reopen a listener that previously
-        // failed to open w/o issuing a close first, even if the port is changed
-        try {
-          listener.close();
-          listener.open();
-          LOG.info("Jetty bound to port " + listener.getLocalPort());
-          break;
-        } catch (BindException ex) {
-          if (port == 0 || !findPort) {
-            BindException be = new BindException("Port in use: "
-                + listener.getHost() + ":" + listener.getPort());
-            be.initCause(ex);
-            throw be;
-          }
-        }
-        // try the next port number
-        listener.setPort(++port);
-        Thread.sleep(100);
+      if (portRanges != null && port != 0) {
+        bindForPortRange(listener, port);
+      } else {
+        bindForSinglePort(listener, port);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cce35c38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
index 6b87cd8..baa6f91 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
@@ -20,10 +20,12 @@ package org.apache.hadoop.http;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configuration.IntegerRanges;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.http.HttpServer2.QuotingInputFilter.RequestQuoter;
 import org.apache.hadoop.http.resource.JerseyResource;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -644,4 +646,40 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     assertNotNull(conn.getHeaderField("Date"));
     assertEquals(conn.getHeaderField("Expires"), conn.getHeaderField("Date"));
   }
+
+  private static void stopHttpServer(HttpServer2 server) throws Exception {
+    if (server != null) {
+      server.stop();
+    }
+  }
+
+  @Test
+  public void testPortRanges() throws Exception {
+    Configuration conf = new Configuration();
+    int port =  ServerSocketUtil.waitForPort(49000, 60);
+    int endPort = 49500;
+    conf.set("abc", "49000-49500");
+    HttpServer2.Builder builder = new HttpServer2.Builder()
+        .setName("test").setConf(new Configuration()).setFindPort(false);
+    IntegerRanges ranges = conf.getRange("abc", "");
+    int startPort = 0;
+    if (ranges != null && !ranges.isEmpty()) {
+       startPort = ranges.getRangeStart();
+       builder.setPortRanges(ranges);
+    }
+    builder.addEndpoint(URI.create("http://localhost:" + startPort));
+    HttpServer2 myServer = builder.build();
+    HttpServer2 myServer2 = null;
+    try {
+      myServer.start();
+      assertEquals(port, myServer.getConnectorAddress(0).getPort());
+      myServer2 = builder.build();
+      myServer2.start();
+      assertTrue(myServer2.getConnectorAddress(0).getPort() > port &&
+          myServer2.getConnectorAddress(0).getPort() <= endPort);
+    } finally {
+      stopHttpServer(myServer);
+      stopHttpServer(myServer2);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cce35c38/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
index 7ce0dfa..9c5e8c3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
@@ -35,6 +35,7 @@ import javax.servlet.http.HttpServlet;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configuration.IntegerRanges;
 import org.apache.hadoop.http.HttpConfig.Policy;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -92,6 +93,7 @@ public class WebApps {
     boolean findPort = false;
     Configuration conf;
     Policy httpPolicy = null;
+    String portRangeConfigKey = null;
     boolean devMode = false;
     private String spnegoPrincipalKey;
     private String spnegoKeytabKey;
@@ -157,6 +159,19 @@ public class WebApps {
       return this;
     }
 
+    /**
+     * Set port range config key and associated configuration object.
+     * @param config configuration.
+     * @param portRangeConfKey port range config key.
+     * @return builder object.
+     */
+    public Builder<T> withPortRange(Configuration config,
+        String portRangeConfKey) {
+      this.conf = config;
+      this.portRangeConfigKey = portRangeConfKey;
+      return this;
+    }
+
     public Builder<T> withHttpSpnegoPrincipalKey(String spnegoPrincipalKey) {
       this.spnegoPrincipalKey = spnegoPrincipalKey;
       return this;
@@ -265,15 +280,24 @@ public class WebApps {
                   : WebAppUtils.HTTP_PREFIX;
         }
         HttpServer2.Builder builder = new HttpServer2.Builder()
-            .setName(name)
-            .addEndpoint(
-                URI.create(httpScheme + bindAddress
-                    + ":" + port)).setConf(conf).setFindPort(findPort)
+            .setName(name).setConf(conf).setFindPort(findPort)
             .setACL(new AccessControlList(conf.get(
-              YarnConfiguration.YARN_ADMIN_ACL, 
-              YarnConfiguration.DEFAULT_YARN_ADMIN_ACL)))
+                YarnConfiguration.YARN_ADMIN_ACL,
+                YarnConfiguration.DEFAULT_YARN_ADMIN_ACL)))
             .setPathSpec(pathList.toArray(new String[0]));
-
+        // Get port ranges from config.
+        IntegerRanges ranges = null;
+        if (portRangeConfigKey != null) {
+          ranges = conf.getRange(portRangeConfigKey, "");
+        }
+        int startPort = port;
+        if (ranges != null && !ranges.isEmpty()) {
+          // Set port ranges if its configured.
+          startPort = ranges.getRangeStart();
+          builder.setPortRanges(ranges);
+        }
+        builder.addEndpoint(URI.create(httpScheme + bindAddress +
+            ":" + startPort));
         boolean hasSpnegoConf = spnegoPrincipalKey != null
             && conf.get(spnegoPrincipalKey) != null && spnegoKeytabKey != null
             && conf.get(spnegoKeytabKey) != null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cce35c38/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
index deef855..9454002 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
@@ -35,6 +35,8 @@ import java.net.HttpURLConnection;
 import java.net.URL;
 
 import org.apache.commons.lang.ArrayUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.yarn.MockApps;
 import org.apache.hadoop.yarn.webapp.view.HtmlPage;
 import org.apache.hadoop.yarn.webapp.view.JQueryUI;
@@ -307,6 +309,50 @@ public class TestWebApp {
     }
   }
 
+  private static void stopWebApp(WebApp app) {
+    if (app != null) {
+      app.stop();
+    }
+  }
+
+  @Test
+  public void testPortRanges() throws Exception {
+    WebApp app = WebApps.$for("test", this).start();
+    String baseUrl = baseUrl(app);
+    WebApp app1 = null;
+    WebApp app2 = null;
+    WebApp app3 = null;
+    WebApp app4 = null;
+    WebApp app5 = null;
+    try {
+      int port =  ServerSocketUtil.waitForPort(48000, 60);
+      assertEquals("foo", getContent(baseUrl +"test/foo").trim());
+      app1 = WebApps.$for("test", this).at(port).start();
+      assertEquals(port, app1.getListenerAddress().getPort());
+      app2 = WebApps.$for("test", this).at("0.0.0.0",port, true).start();
+      assertTrue(app2.getListenerAddress().getPort() > port);
+      Configuration conf = new Configuration();
+      port =  ServerSocketUtil.waitForPort(47000, 60);
+      app3 = WebApps.$for("test", this).at(port).withPortRange(conf, "abc").
+          start();
+      assertEquals(port, app3.getListenerAddress().getPort());
+      ServerSocketUtil.waitForPort(46000, 60);
+      conf.set("abc", "46000-46500");
+      app4 = WebApps.$for("test", this).at(port).withPortRange(conf, "abc").
+          start();
+      assertEquals(46000, app4.getListenerAddress().getPort());
+      app5 = WebApps.$for("test", this).withPortRange(conf, "abc").start();
+      assertTrue(app5.getListenerAddress().getPort() > 46000);
+    } finally {
+      stopWebApp(app);
+      stopWebApp(app1);
+      stopWebApp(app2);
+      stopWebApp(app3);
+      stopWebApp(app4);
+      stopWebApp(app5);
+    }
+  }
+
   static String baseUrl(WebApp app) {
     return "http://localhost:"+ app.port() +"/";
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/23] hadoop git commit: HDFS-10219. Change the default value for dfs.namenode.reconstruction.pending.timeout-sec from -1 to 300. Contributed by Yiqun Lin.

Posted by jh...@apache.org.
HDFS-10219. Change the default value for dfs.namenode.reconstruction.pending.timeout-sec from -1 to 300. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/663e683a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/663e683a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/663e683a

Branch: refs/heads/YARN-5734
Commit: 663e683adfbbbffeacdddcd846bd336c121df5c7
Parents: 9cbbd1e
Author: Yiqun Lin <yq...@apache.org>
Authored: Mon Feb 6 19:19:30 2017 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Mon Feb 6 19:19:30 2017 +0800

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java      | 3 ++-
 .../hdfs/server/blockmanagement/PendingReconstructionBlocks.java | 4 +++-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml              | 2 +-
 3 files changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/663e683a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 41c5aaa..10a521b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -219,7 +219,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
 
   public static final String  DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY =
       "dfs.namenode.reconstruction.pending.timeout-sec";
-  public static final int     DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
+  public static final int
+      DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT = 300;
 
   public static final String  DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY =
       "dfs.namenode.maintenance.replication.min";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/663e683a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
index 6628c43..2221d1d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT;
 import static org.apache.hadoop.util.Time.monotonicNow;
 
 import java.io.PrintWriter;
@@ -56,7 +57,8 @@ class PendingReconstructionBlocks {
   // It might take anywhere between 5 to 10 minutes before
   // a request is timed out.
   //
-  private long timeout = 5 * 60 * 1000;
+  private long timeout =
+      DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT * 1000;
   private final static long DEFAULT_RECHECK_INTERVAL = 5 * 60 * 1000;
 
   PendingReconstructionBlocks(long timeoutPeriod) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/663e683a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 966cb2f..2bbc788 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4036,7 +4036,7 @@
 
 <property>
   <name>dfs.namenode.reconstruction.pending.timeout-sec</name>
-  <value>-1</value>
+  <value>300</value>
   <description>
     Timeout in seconds for block reconstruction.  If this value is 0 or less,
     then it will default to 5 minutes.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org