You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by aw...@apache.org on 2015/07/30 03:27:31 UTC

[01/14] hadoop git commit: HADOOP-12175. FsShell must load SpanReceierHost to support tracing (Masatake Iwasaki via Colin P. McCabe)

Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-12111 03335bb4d -> 42d0c0fac


HADOOP-12175. FsShell must load SpanReceierHost to support tracing (Masatake Iwasaki via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69b09573
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69b09573
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69b09573

Branch: refs/heads/HADOOP-12111
Commit: 69b095730bd87706ddcbabeea6c513290ee4cdf5
Parents: f170934
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Tue Jul 28 12:49:38 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Tue Jul 28 12:56:15 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../main/java/org/apache/hadoop/fs/FsShell.java | 11 ++++++++-
 .../java/org/apache/hadoop/fs/TestFsShell.java  | 24 ++++++++++++++++++++
 .../apache/hadoop/tracing/SetSpanReceiver.java  |  2 +-
 4 files changed, 38 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69b09573/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index aeaa5b9..84e7076 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1020,6 +1020,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12245. References to misspelled REMAINING_QUATA in
     FileSystemShell.md. (Gabor Liptak via aajisaka)
 
+    HADOOP-12175. FsShell must load SpanReceierHost to support tracing
+    (Masatake Iwasaki via Colin P. McCabe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69b09573/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index 24b6339..e9c2f73 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFactory;
 import org.apache.hadoop.fs.shell.FsCommand;
+import org.apache.hadoop.tracing.SpanReceiverHost;
 import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.tracing.TraceUtils;
 import org.apache.hadoop.util.Tool;
@@ -57,6 +58,9 @@ public class FsShell extends Configured implements Tool {
   private final String usagePrefix =
     "Usage: hadoop fs [generic options]";
 
+  private SpanReceiverHost spanReceiverHost;
+  static final String SEHLL_HTRACE_PREFIX = "dfs.shell.htrace.";
+
   /**
    * Default ctor with no configuration.  Be sure to invoke
    * {@link #setConf(Configuration)} with a valid configuration prior
@@ -97,6 +101,8 @@ public class FsShell extends Configured implements Tool {
       commandFactory.addObject(new Usage(), "-usage");
       registerCommands(commandFactory);
     }
+    this.spanReceiverHost =
+        SpanReceiverHost.get(getConf(), SEHLL_HTRACE_PREFIX);
   }
 
   protected void registerCommands(CommandFactory factory) {
@@ -279,7 +285,7 @@ public class FsShell extends Configured implements Tool {
     // initialize FsShell
     init();
     traceSampler = new SamplerBuilder(TraceUtils.
-        wrapHadoopConf("dfs.shell.htrace.", getConf())).build();
+        wrapHadoopConf(SEHLL_HTRACE_PREFIX, getConf())).build();
     int exitCode = -1;
     if (argv.length < 1) {
       printUsage(System.err);
@@ -335,6 +341,9 @@ public class FsShell extends Configured implements Tool {
       fs.close();
       fs = null;
     }
+    if (this.spanReceiverHost != null) {
+      this.spanReceiverHost.closeReceivers();
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69b09573/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShell.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShell.java
index 12c8bcf..38e768f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShell.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShell.java
@@ -18,6 +18,12 @@
 package org.apache.hadoop.fs;
 
 import junit.framework.AssertionFailedError;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.tracing.SetSpanReceiver;
+import org.apache.hadoop.tracing.SpanReceiverHost;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.htrace.SamplerBuilder;
+import org.apache.htrace.impl.AlwaysSampler;
 import org.junit.Test;
 
 public class TestFsShell {
@@ -39,4 +45,22 @@ public class TestFsShell {
     }
   }
 
+  @Test
+  public void testTracing() throws Throwable {
+    Configuration conf = new Configuration();
+    String prefix = FsShell.SEHLL_HTRACE_PREFIX;
+    conf.set(prefix + SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX,
+        SetSpanReceiver.class.getName());
+    conf.set(prefix + SamplerBuilder.SAMPLER_CONF_KEY,
+        AlwaysSampler.class.getName());
+    conf.setQuietMode(false);
+    FsShell shell = new FsShell(conf);
+    int res;
+    try {
+      res = ToolRunner.run(shell, new String[]{"-help"});
+    } finally {
+      shell.close();
+    }
+    SetSpanReceiver.assertSpanNamesFound(new String[]{"help"});
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69b09573/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java
index e242b74..97ca7c4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java
@@ -86,7 +86,7 @@ public class SetSpanReceiver implements SpanReceiver {
     }
   }
 
-  static void assertSpanNamesFound(final String[] expectedSpanNames) {
+  public static void assertSpanNamesFound(final String[] expectedSpanNames) {
     try {
       GenericTestUtils.waitFor(new Supplier<Boolean>() {
         @Override


[02/14] hadoop git commit: HDFS-8180. AbstractFileSystem Implementation for WebHdfs. Contributed by Sathosh G Nayak.

Posted by aw...@apache.org.
HDFS-8180. AbstractFileSystem Implementation for WebHdfs. Contributed by Sathosh G Nayak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0712a810
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0712a810
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0712a810

Branch: refs/heads/HADOOP-12111
Commit: 0712a8103fec6e9a9ceba335e3c3800b85b2c7ca
Parents: 69b0957
Author: Jakob Homan <jg...@gmail.com>
Authored: Tue Jul 28 21:03:31 2015 -0700
Committer: Jakob Homan <jg...@gmail.com>
Committed: Tue Jul 28 21:03:31 2015 -0700

----------------------------------------------------------------------
 .../src/main/resources/core-default.xml         |  12 ++
 .../fs/FileContextMainOperationsBaseTest.java   |   4 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 .../java/org/apache/hadoop/fs/SWebHdfs.java     |  51 ++++++
 .../main/java/org/apache/hadoop/fs/WebHdfs.java |  51 ++++++
 .../main/java/org/apache/hadoop/fs/package.html |  26 +++
 .../TestSWebHdfsFileContextMainOperations.java  | 110 +++++++++++++
 .../TestWebHdfsFileContextMainOperations.java   | 157 +++++++++++++++++++
 8 files changed, 411 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0712a810/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 13702ee..bfdd453 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -596,6 +596,18 @@ for ldap providers in the same way as above does.
 </property>
 
 <property>
+  <name>fs.AbstractFileSystem.webhdfs.impl</name>
+  <value>org.apache.hadoop.fs.WebHdfs</value>
+  <description>The FileSystem for webhdfs: uris.</description>
+</property>
+
+<property>
+  <name>fs.AbstractFileSystem.swebhdfs.impl</name>
+  <value>org.apache.hadoop.fs.SWebHdfs</value>
+  <description>The FileSystem for swebhdfs: uris.</description>
+</property>
+
+<property>
   <name>fs.ftp.host</name>
   <value>0.0.0.0</value>
   <description>FTP filesystem connects to this server</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0712a810/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
index e872176..12ec375 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
@@ -1249,7 +1249,7 @@ public abstract class FileContextMainOperationsBaseTest  {
     byte[] bb = new byte[(int)len];
     FSDataInputStream fsdis = fc.open(path);
     try {
-      fsdis.read(bb);
+      fsdis.readFully(bb);
     } finally {
       fsdis.close();
     }
@@ -1310,7 +1310,7 @@ public abstract class FileContextMainOperationsBaseTest  {
     byte[] bb = new byte[data.length];
     FSDataInputStream fsdis = fc.open(path);
     try {
-      fsdis.read(bb);
+      fsdis.readFully(bb);
     } finally {
       fsdis.close();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0712a810/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9b2de81..ef12720 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -755,6 +755,8 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-7858. Improve HA Namenode Failover detection on the client. (asuresh)
 
+    HDFS-8180. AbstractFileSystem Implementation for WebHdfs. (snayak via jghoman)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0712a810/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/SWebHdfs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/SWebHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/SWebHdfs.java
new file mode 100644
index 0000000..cd36393
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/SWebHdfs.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+/**
+ * AbstractFileSystem implementation for HDFS over the web (secure).
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class SWebHdfs extends DelegateToFileSystem {
+
+  public static final String SCHEME = "swebhdfs";
+
+  /**
+   * This constructor has the signature needed by
+   * {@link AbstractFileSystem#createFileSystem(URI, Configuration)}
+   *
+   * @param theUri which must be that of swebhdfs
+   * @param conf   configuration
+   * @throws IOException
+   */
+  SWebHdfs(URI theUri, Configuration conf)
+      throws IOException, URISyntaxException {
+    super(theUri, new SWebHdfsFileSystem(), conf, SCHEME, false);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0712a810/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/WebHdfs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/WebHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/WebHdfs.java
new file mode 100644
index 0000000..dc4f6d5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/WebHdfs.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+/**
+ * AbstractFileSystem implementation for HDFS over the web.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class WebHdfs extends DelegateToFileSystem {
+
+  public static final String SCHEME = "webhdfs";
+
+  /**
+   * This constructor has the signature needed by
+   * {@link AbstractFileSystem#createFileSystem(URI, Configuration)}
+   *
+   * @param theUri which must be that of webhdfs
+   * @param conf   configuration
+   * @throws IOException
+   */
+  WebHdfs(URI theUri, Configuration conf)
+      throws IOException, URISyntaxException {
+    super(theUri, new WebHdfsFileSystem(), conf, SCHEME, false);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0712a810/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/package.html
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/package.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/package.html
new file mode 100644
index 0000000..53b2a5a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/package.html
@@ -0,0 +1,26 @@
+<html>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<body>
+
+<p>Implementations of {@link org.apache.hadoop.fs.AbstractFileSystem} for hdfs
+    over rpc and hdfs over web.</p>
+
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0712a810/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java
new file mode 100644
index 0000000..874abd6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import javax.security.auth.login.LoginException;
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import static org.apache.hadoop.fs.FileContextTestHelper.getDefaultBlockSize;
+import static org.apache.hadoop.fs.FileContextTestHelper.getFileData;
+
+/**
+ * Test of FileContext apis on SWebhdfs.
+ */
+public class TestSWebHdfsFileContextMainOperations
+    extends TestWebHdfsFileContextMainOperations {
+
+  private static MiniDFSCluster cluster;
+  private static Path defaultWorkingDirectory;
+  private static String keystoresDir;
+  private static String sslConfDir;
+  protected static URI webhdfsUrl;
+
+  private static final HdfsConfiguration CONF = new HdfsConfiguration();
+
+  private static final String BASEDIR =
+      System.getProperty("test.build.dir", "target/test-dir") + "/"
+          + TestSWebHdfsFileContextMainOperations.class.getSimpleName();
+  protected static int numBlocks = 2;
+  protected static final byte[] data = getFileData(numBlocks,
+      getDefaultBlockSize());
+
+  private static Configuration sslConf;
+
+  @BeforeClass
+  public static void clusterSetupAtBeginning()
+      throws IOException, LoginException, URISyntaxException {
+
+    File base = new File(BASEDIR);
+    FileUtil.fullyDelete(base);
+    base.mkdirs();
+    keystoresDir = new File(BASEDIR).getAbsolutePath();
+    sslConf = new Configuration();
+
+    try {
+      sslConfDir = KeyStoreTestUtil
+          .getClasspathDir(TestSWebHdfsFileContextMainOperations.class);
+      KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, sslConf, false);
+    } catch (Exception ex) {
+      throw new RuntimeException(ex);
+    }
+    CONF.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, "HTTPS_ONLY");
+    CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
+    CONF.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
+    CONF.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "DEFAULT_AND_LOCALHOST");
+    cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
+
+    cluster.waitClusterUp();
+    webhdfsUrl = new URI(SWebHdfs.SCHEME + "://" + cluster.getConfiguration(0)
+        .get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY));
+
+    fc = FileContext.getFileContext(webhdfsUrl, CONF);
+    defaultWorkingDirectory = fc.makeQualified(new Path(
+        "/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));
+    fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
+
+  }
+
+  @Override
+  public URI getWebhdfsUrl() {
+    return webhdfsUrl;
+  }
+
+  @AfterClass
+  public static void ClusterShutdownAtEnd() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+    FileUtil.fullyDelete(new File(BASEDIR));
+    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0712a810/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java
new file mode 100644
index 0000000..c4bf0ce
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java
@@ -0,0 +1,157 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import org.apache.commons.lang.RandomStringUtils;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import javax.security.auth.login.LoginException;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.EnumSet;
+
+import static org.apache.hadoop.fs.CreateFlag.CREATE;
+import static org.apache.hadoop.fs.FileContextTestHelper.getDefaultBlockSize;
+import static org.apache.hadoop.fs.FileContextTestHelper.getFileData;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test of FileContext apis on Webhdfs.
+ */
+public class TestWebHdfsFileContextMainOperations
+    extends FileContextMainOperationsBaseTest {
+
+  protected static MiniDFSCluster cluster;
+  private static Path defaultWorkingDirectory;
+  protected static URI webhdfsUrl;
+
+  protected static int numBlocks = 2;
+
+  protected static final byte[] data = getFileData(numBlocks,
+      getDefaultBlockSize());
+  protected static final HdfsConfiguration CONF = new HdfsConfiguration();
+
+  @Override
+  public Path getDefaultWorkingDirectory() {
+    return defaultWorkingDirectory;
+  }
+
+  public URI getWebhdfsUrl() {
+    return webhdfsUrl;
+  }
+
+  @BeforeClass
+  public static void clusterSetupAtBeginning()
+      throws IOException, LoginException, URISyntaxException {
+
+    cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
+    cluster.waitClusterUp();
+    webhdfsUrl = new URI(WebHdfs.SCHEME + "://" + cluster.getConfiguration(0)
+        .get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
+    fc = FileContext.getFileContext(webhdfsUrl, CONF);
+    defaultWorkingDirectory = fc.makeQualified(new Path(
+        "/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));
+    fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    URI webhdfsUrlReal = getWebhdfsUrl();
+    Path testBuildData = new Path(
+        webhdfsUrlReal + "/build/test/data/" + RandomStringUtils
+            .randomAlphanumeric(10));
+    Path rootPath = new Path(testBuildData, "root-uri");
+
+    localFsRootPath = rootPath.makeQualified(webhdfsUrlReal, null);
+    fc.mkdir(getTestRootPath(fc, "test"), FileContext.DEFAULT_PERM, true);
+  }
+
+  private Path getTestRootPath(FileContext fc, String path) {
+    return fileContextTestHelper.getTestRootPath(fc, path);
+  }
+
+  @Override
+  protected boolean listCorruptedBlocksSupported() {
+    return false;
+  }
+
+  /**
+   * Test FileContext APIs when symlinks are not supported
+   * TODO: Open separate JIRA for full support of the Symlink in webhdfs
+   */
+  @Test
+  public void testUnsupportedSymlink() throws IOException {
+    /**
+     * WebHdfs client Partially supports the Symlink.
+     * creation of Symlink is supported, but the getLinkTargetPath() api is not supported currently,
+     * Implement the test case once the full support is available.
+     */
+  }
+
+  /**
+   * TODO: Open JIRA for the idiosyncrasies between hdfs and webhdfs
+   */
+  public void testSetVerifyChecksum() throws IOException {
+    final Path rootPath = getTestRootPath(fc, "test");
+    final Path path = new Path(rootPath, "zoo");
+
+    FSDataOutputStream out = fc
+        .create(path, EnumSet.of(CREATE), Options.CreateOpts.createParent());
+    try {
+      out.write(data, 0, data.length);
+    } finally {
+      out.close();
+    }
+
+    //In webhdfs scheme fc.setVerifyChecksum() can be called only after
+    // writing first few bytes but in case of the hdfs scheme we can call
+    // immediately after the creation call.
+    // instruct FS to verify checksum through the FileContext:
+    fc.setVerifyChecksum(true, path);
+
+    FileStatus fileStatus = fc.getFileStatus(path);
+    final long len = fileStatus.getLen();
+    assertTrue(len == data.length);
+    byte[] bb = new byte[(int) len];
+    FSDataInputStream fsdis = fc.open(path);
+    try {
+      fsdis.readFully(bb);
+    } finally {
+      fsdis.close();
+    }
+    assertArrayEquals(data, bb);
+  }
+
+  @AfterClass
+  public static void ClusterShutdownAtEnd() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+}


[09/14] hadoop git commit: HADOOP-10945. 4-digit octal umask permissions throws a parse error. Contributed by Chang Li

Posted by aw...@apache.org.
HADOOP-10945. 4-digit octal umask permissions throws a parse error. Contributed by Chang Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f0a3572
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f0a3572
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f0a3572

Branch: refs/heads/HADOOP-12111
Commit: 6f0a35724f0da80146dbae4b6f6c341e1d3101f5
Parents: 2b2bd92
Author: Jason Lowe <jl...@apache.org>
Authored: Wed Jul 29 15:25:10 2015 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Wed Jul 29 15:25:10 2015 +0000

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../hadoop/fs/permission/UmaskParser.java       |  2 +-
 .../apache/hadoop/security/TestPermission.java  | 26 ++++++++++++++++++++
 3 files changed, 30 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f0a3572/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 84e7076..098194c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1023,6 +1023,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12175. FsShell must load SpanReceierHost to support tracing
     (Masatake Iwasaki via Colin P. McCabe)
 
+    HADOOP-10945. 4-digit octal umask permissions throws a parse error (Chang
+    Li via jlowe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f0a3572/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/UmaskParser.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/UmaskParser.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/UmaskParser.java
index 79956c5..0cf26c5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/UmaskParser.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/UmaskParser.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceStability.Unstable
 class UmaskParser extends PermissionParser {
   private static Pattern chmodOctalPattern =
-    Pattern.compile("^\\s*[+]?()([0-7]{3})\\s*$"); // no leading 1 for sticky bit
+    Pattern.compile("^\\s*[+]?(0*)([0-7]{3})\\s*$"); // no leading 1 for sticky bit
   private static Pattern umaskSymbolicPattern =    /* not allow X or t */
     Pattern.compile("\\G\\s*([ugoa]*)([+=-]+)([rwx]*)([,\\s]*)\\s*");
   final short umaskMode;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f0a3572/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
index 036aaae..425c82e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.security;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.Random;
@@ -100,6 +101,31 @@ public class TestPermission {
     conf = new Configuration();
     conf.set(FsPermission.UMASK_LABEL, "022");
     assertEquals(18, FsPermission.getUMask(conf).toShort());
+
+    // Test 5 - equivalent valid umask
+    conf = new Configuration();
+    conf.set(FsPermission.UMASK_LABEL, "0022");
+    assertEquals(18, FsPermission.getUMask(conf).toShort());
+
+    // Test 6 - invalid umask
+    conf = new Configuration();
+    conf.set(FsPermission.UMASK_LABEL, "1222");
+    try {
+      FsPermission.getUMask(conf);
+      fail("expect IllegalArgumentException happen");
+    } catch (IllegalArgumentException e) {
+     //pass, exception successfully trigger
+    }
+
+    // Test 7 - invalid umask
+    conf = new Configuration();
+    conf.set(FsPermission.UMASK_LABEL, "01222");
+    try {
+      FsPermission.getUMask(conf);
+      fail("expect IllegalArgumentException happen");
+    } catch (IllegalArgumentException e) {
+     //pass, exception successfully trigger
+    }
   }
 
   @Test


[05/14] hadoop git commit: HDFS-8834. TestReplication is not valid after HDFS-6482. (Contributed by Lei Xu)

Posted by aw...@apache.org.
HDFS-8834. TestReplication is not valid after HDFS-6482. (Contributed by Lei Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4f1b8b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4f1b8b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4f1b8b2

Branch: refs/heads/HADOOP-12111
Commit: f4f1b8b267703b8bebab06e17e69a4a4de611592
Parents: 975e138
Author: Lei Xu <le...@cloudera.com>
Authored: Tue Jul 28 16:55:51 2015 -0700
Committer: Lei Xu <le...@cloudera.com>
Committed: Tue Jul 28 23:39:26 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/TestReplication.java | 26 +++++++++++++++++++-
 1 file changed, 25 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4f1b8b2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
index b702da0..2139df9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.File;
@@ -25,6 +27,10 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
@@ -505,12 +511,28 @@ public class TestReplication {
         if (data_dir.listFiles().length == 0) {
           nonParticipatedNodeDirs.add(data_dir);
         } else {
+          assertNull("participatedNodeDirs has already been set.",
+              participatedNodeDirs);
           participatedNodeDirs = data_dir;
         }
       }
+      assertEquals(2, nonParticipatedNodeDirs.size());
 
       String blockFile = null;
-      File[] listFiles = participatedNodeDirs.listFiles();
+      final List<File> listFiles = new ArrayList<>();
+      Files.walkFileTree(participatedNodeDirs.toPath(),
+          new SimpleFileVisitor<java.nio.file.Path>() {
+            @Override
+            public FileVisitResult visitFile(
+                java.nio.file.Path file, BasicFileAttributes attrs)
+                throws IOException {
+              listFiles.add(file.toFile());
+              return FileVisitResult.CONTINUE;
+            }
+          }
+      );
+      assertFalse(listFiles.isEmpty());
+      int numReplicaCreated = 0;
       for (File file : listFiles) {
         if (file.getName().startsWith(Block.BLOCK_FILE_PREFIX)
             && !file.getName().endsWith("meta")) {
@@ -519,10 +541,12 @@ public class TestReplication {
             file1.mkdirs();
             new File(file1, blockFile).createNewFile();
             new File(file1, blockFile + "_1000.meta").createNewFile();
+            numReplicaCreated++;
           }
           break;
         }
       }
+      assertEquals(2, numReplicaCreated);
 
       fs.setReplication(new Path("/test"), (short) 3);
       cluster.restartDataNodes(); // Lets detect all DNs about dummy copied


[04/14] hadoop git commit: HDFS-8822. Add SSD storagepolicy tests in TestBlockStoragePolicy#testDefaultPolicies (Contributed by Vinayakumar B)

Posted by aw...@apache.org.
HDFS-8822. Add SSD storagepolicy tests in TestBlockStoragePolicy#testDefaultPolicies (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/975e138d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/975e138d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/975e138d

Branch: refs/heads/HADOOP-12111
Commit: 975e138df316f59e8bb0642e138d4b1170fb8184
Parents: 50887e5
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Jul 29 11:50:48 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Wed Jul 29 11:50:48 2015 +0530

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../hadoop/hdfs/TestBlockStoragePolicy.java     | 88 ++++++++++++++++----
 2 files changed, 75 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/975e138d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 654c40f..d30c258 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -760,6 +760,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8811. Move BlockStoragePolicy name's constants from
     HdfsServerConstants.java to HdfsConstants.java (vinayakumarb)
 
+    HDFS-8822. Add SSD storagepolicy tests in TestBlockStoragePolicy#
+    testDefaultPolicies (vinayakumarb)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/975e138d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
index 78e1e58..631d9f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
@@ -67,7 +67,11 @@ public class TestBlockStoragePolicy {
   static final EnumSet<StorageType> none = EnumSet.noneOf(StorageType.class);
   static final EnumSet<StorageType> archive = EnumSet.of(StorageType.ARCHIVE);
   static final EnumSet<StorageType> disk = EnumSet.of(StorageType.DISK);
-  static final EnumSet<StorageType> both = EnumSet.of(StorageType.DISK, StorageType.ARCHIVE);
+  static final EnumSet<StorageType> ssd = EnumSet.of(StorageType.SSD);
+  static final EnumSet<StorageType> disk_archive = EnumSet.of(StorageType.DISK,
+      StorageType.ARCHIVE);
+  static final EnumSet<StorageType> all = EnumSet.of(StorageType.SSD,
+      StorageType.DISK, StorageType.ARCHIVE);
 
   static final long FILE_LEN = 1024;
   static final short REPLICATION = 3;
@@ -153,8 +157,8 @@ public class TestBlockStoragePolicy {
         final List<StorageType> computed = cold.chooseStorageTypes(replication);
         assertStorageType(computed, replication, StorageType.ARCHIVE);
       }
-      assertCreationFallback(cold, null, null, null);
-      assertReplicationFallback(cold, null, null, null);
+      assertCreationFallback(cold, null, null, null, null, null);
+      assertReplicationFallback(cold, null, null, null, null);
     }
     
     { // check Warm policy
@@ -163,8 +167,10 @@ public class TestBlockStoragePolicy {
         final List<StorageType> computed = warm.chooseStorageTypes(replication);
         assertStorageType(computed, replication, StorageType.DISK, StorageType.ARCHIVE);
       }
-      assertCreationFallback(warm, StorageType.DISK, StorageType.DISK, StorageType.ARCHIVE);
-      assertReplicationFallback(warm, StorageType.DISK, StorageType.DISK, StorageType.ARCHIVE);
+      assertCreationFallback(warm, StorageType.DISK, StorageType.DISK,
+          StorageType.ARCHIVE, StorageType.DISK, null);
+      assertReplicationFallback(warm, StorageType.DISK, StorageType.DISK,
+          StorageType.ARCHIVE, StorageType.DISK);
     }
 
     { // check Hot policy
@@ -173,8 +179,50 @@ public class TestBlockStoragePolicy {
         final List<StorageType> computed = hot.chooseStorageTypes(replication);
         assertStorageType(computed, replication, StorageType.DISK);
       }
-      assertCreationFallback(hot, null, null, null);
-      assertReplicationFallback(hot, StorageType.ARCHIVE, null, StorageType.ARCHIVE);
+      assertCreationFallback(hot, null, null, null, null, null);
+      assertReplicationFallback(hot, StorageType.ARCHIVE, null,
+          StorageType.ARCHIVE, StorageType.ARCHIVE);
+    }
+
+    { // check ONE_SSD policy
+      final BlockStoragePolicy onessd = POLICY_SUITE.getPolicy(ONESSD);
+      for (short replication = 1; replication < 6; replication++) {
+        final List<StorageType> computed = onessd
+            .chooseStorageTypes(replication);
+        assertStorageType(computed, replication, StorageType.SSD,
+            StorageType.DISK);
+      }
+      assertCreationFallback(onessd, StorageType.SSD, StorageType.SSD,
+          StorageType.SSD, StorageType.DISK, StorageType.SSD);
+      assertReplicationFallback(onessd, StorageType.SSD, StorageType.SSD,
+          StorageType.SSD, StorageType.DISK);
+    }
+
+    { // check ALL_SSD policy
+      final BlockStoragePolicy allssd = POLICY_SUITE.getPolicy(ALLSSD);
+      for (short replication = 1; replication < 6; replication++) {
+        final List<StorageType> computed = allssd
+            .chooseStorageTypes(replication);
+        assertStorageType(computed, replication, StorageType.SSD);
+      }
+      assertCreationFallback(allssd, StorageType.DISK, StorageType.DISK, null,
+          StorageType.DISK, null);
+      assertReplicationFallback(allssd, StorageType.DISK, StorageType.DISK,
+          null, StorageType.DISK);
+    }
+
+    { // check LAZY_PERSIST policy
+      final BlockStoragePolicy lazyPersist = POLICY_SUITE
+          .getPolicy(LAZY_PERSIST);
+      for (short replication = 1; replication < 6; replication++) {
+        final List<StorageType> computed = lazyPersist
+            .chooseStorageTypes(replication);
+        assertStorageType(computed, replication, StorageType.DISK);
+      }
+      assertCreationFallback(lazyPersist, StorageType.DISK, StorageType.DISK,
+          null, StorageType.DISK, null);
+      assertReplicationFallback(lazyPersist, StorageType.DISK,
+          StorageType.DISK, null, StorageType.DISK);
     }
   }
 
@@ -199,20 +247,28 @@ public class TestBlockStoragePolicy {
     }
   }
 
-  static void assertCreationFallback(BlockStoragePolicy policy, StorageType noneExpected,
-      StorageType archiveExpected, StorageType diskExpected) {
+  static void assertCreationFallback(BlockStoragePolicy policy,
+      StorageType noneExpected, StorageType archiveExpected,
+      StorageType diskExpected, StorageType ssdExpected,
+      StorageType disk_archiveExpected) {
     Assert.assertEquals(noneExpected, policy.getCreationFallback(none));
     Assert.assertEquals(archiveExpected, policy.getCreationFallback(archive));
     Assert.assertEquals(diskExpected, policy.getCreationFallback(disk));
-    Assert.assertEquals(null, policy.getCreationFallback(both));
+    Assert.assertEquals(ssdExpected, policy.getCreationFallback(ssd));
+    Assert.assertEquals(disk_archiveExpected,
+        policy.getCreationFallback(disk_archive));
+    Assert.assertEquals(null, policy.getCreationFallback(all));
   }
 
-  static void assertReplicationFallback(BlockStoragePolicy policy, StorageType noneExpected,
-      StorageType archiveExpected, StorageType diskExpected) {
+  static void assertReplicationFallback(BlockStoragePolicy policy,
+      StorageType noneExpected, StorageType archiveExpected,
+      StorageType diskExpected, StorageType ssdExpected) {
     Assert.assertEquals(noneExpected, policy.getReplicationFallback(none));
-    Assert.assertEquals(archiveExpected, policy.getReplicationFallback(archive));
+    Assert
+        .assertEquals(archiveExpected, policy.getReplicationFallback(archive));
     Assert.assertEquals(diskExpected, policy.getReplicationFallback(disk));
-    Assert.assertEquals(null, policy.getReplicationFallback(both));
+    Assert.assertEquals(ssdExpected, policy.getReplicationFallback(ssd));
+    Assert.assertEquals(null, policy.getReplicationFallback(all));
   }
 
   private static interface CheckChooseStorageTypes {
@@ -260,7 +316,7 @@ public class TestBlockStoragePolicy {
       public void checkChooseStorageTypes(BlockStoragePolicy p,
           short replication, List<StorageType> chosen, StorageType... expected) {
         final List<StorageType> types = p.chooseStorageTypes(replication,
-            chosen, both, true);
+            chosen, disk_archive, true);
         assertStorageTypes(types, expected);
       }
     };
@@ -272,7 +328,7 @@ public class TestBlockStoragePolicy {
       public void checkChooseStorageTypes(BlockStoragePolicy p,
           short replication, List<StorageType> chosen, StorageType... expected) {
         final List<StorageType> types = p.chooseStorageTypes(replication,
-            chosen, both, false);
+            chosen, disk_archive, false);
         assertStorageTypes(types, expected);
       }
     };


[07/14] hadoop git commit: HDFS-8670. Better to exclude decommissioned nodes for namenode NodeUsage JMX (Contributed by J.Andreina)

Posted by aw...@apache.org.
HDFS-8670. Better to exclude decommissioned nodes for namenode NodeUsage JMX (Contributed by J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6374ee0d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6374ee0d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6374ee0d

Branch: refs/heads/HADOOP-12111
Commit: 6374ee0db445e0a1c3462c19ddee345df740cfb3
Parents: 2a1d656
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Jul 29 14:47:19 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Wed Jul 29 14:47:19 2015 +0530

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../hdfs/server/namenode/FSNamesystem.java      |  6 ++
 .../apache/hadoop/hdfs/TestDecommission.java    | 98 ++++++++++++++++++++
 3 files changed, 107 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6374ee0d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9a0c6da..cf03d3c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1093,6 +1093,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-8785. TestDistributedFileSystem is failing in trunk. (Xiaoyu Yao)
 
+    HDFS-8670. Better to exclude decommissioned nodes for namenode NodeUsage JMX
+    (J.Andreina via vinayakumarb)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6374ee0d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 0b44431..a259070 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -5999,6 +5999,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         new HashMap<String, Map<String,Object>>();
     final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
     blockManager.getDatanodeManager().fetchDatanodes(live, null, true);
+    for (Iterator<DatanodeDescriptor> it = live.iterator(); it.hasNext();) {
+      DatanodeDescriptor node = it.next();
+      if (node.isDecommissionInProgress() || node.isDecommissioned()) {
+        it.remove();
+      }
+    }
 
     if (live.size() > 0) {
       float totalDfsUsed = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6374ee0d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 8f965ad..413a3cf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -28,6 +28,7 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.Random;
 import java.util.concurrent.ExecutionException;
 
@@ -56,6 +57,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -68,6 +70,7 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
+import org.mortbay.util.ajax.JSON;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -1127,4 +1130,99 @@ public class TestDecommission {
     assertEquals("Unexpected number of pending nodes", pending,
         decomManager.getNumPendingNodes());
   }
+
+  /**
+   * Decommissioned node should not be considered while calculating node usage
+   * @throws InterruptedException
+   */
+  @Test
+  public void testNodeUsageAfterDecommissioned()
+      throws IOException, InterruptedException {
+    nodeUsageVerification(2, new long[] { 26384L, 26384L },
+        AdminStates.DECOMMISSIONED);
+  }
+
+  /**
+   * DECOMMISSION_INPROGRESS node should not be considered
+   * while calculating node usage
+   * @throws InterruptedException
+   */
+  @Test
+  public void testNodeUsageWhileDecommissioining()
+      throws IOException, InterruptedException {
+    nodeUsageVerification(1, new long[] { 26384L },
+        AdminStates.DECOMMISSION_INPROGRESS);
+  }
+
+  @SuppressWarnings({ "unchecked" })
+  public void nodeUsageVerification(int numDatanodes, long[] nodesCapacity,
+      AdminStates decommissionState) throws IOException, InterruptedException {
+    Map<String, Map<String, String>> usage = null;
+    DatanodeInfo decommissionedNodeInfo = null;
+    String zeroNodeUsage = "0.00%";
+    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
+    FileSystem fileSys = null;
+    Path file1 = new Path("testNodeUsage.dat");
+    try {
+      SimulatedFSDataset.setFactory(conf);
+      cluster =
+          new MiniDFSCluster.Builder(conf)
+              .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1))
+              .numDataNodes(numDatanodes)
+              .simulatedCapacities(nodesCapacity).build();
+      cluster.waitActive();
+      DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
+      validateCluster(client, numDatanodes);
+
+      ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList =
+          new ArrayList<ArrayList<DatanodeInfo>>(1);
+      namenodeDecomList.add(0, new ArrayList<DatanodeInfo>(numDatanodes));
+
+      if (decommissionState == AdminStates.DECOMMISSIONED) {
+        // Move datanode1 to Decommissioned state
+        ArrayList<DatanodeInfo> decommissionedNode = namenodeDecomList.get(0);
+        decommissionedNodeInfo = decommissionNode(0, null,
+            decommissionedNode, decommissionState);
+      }
+      // Write a file(replica 1).Hence will be written to only one live node.
+      fileSys = cluster.getFileSystem(0);
+      FSNamesystem ns = cluster.getNamesystem(0);
+      writeFile(fileSys, file1, 1);
+      Thread.sleep(2000);
+
+      // min NodeUsage should not be 0.00%
+      usage = (Map<String, Map<String, String>>) JSON.parse(ns.getNodeUsage());
+      String minUsageBeforeDecom = usage.get("nodeUsage").get("min");
+      assertTrue(!minUsageBeforeDecom.equalsIgnoreCase(zeroNodeUsage));
+
+      if (decommissionState == AdminStates.DECOMMISSION_INPROGRESS) {
+        // Start decommissioning datanode
+        ArrayList<DatanodeInfo> decommissioningNodes = namenodeDecomList.
+            get(0);
+        decommissionedNodeInfo = decommissionNode(0, null,
+            decommissioningNodes, decommissionState);
+        // NodeUsage should not include DECOMMISSION_INPROGRESS node
+        // (minUsage should be 0.00%)
+        usage = (Map<String, Map<String, String>>)
+            JSON.parse(ns.getNodeUsage());
+        assertTrue(usage.get("nodeUsage").get("min").
+            equalsIgnoreCase(zeroNodeUsage));
+      }
+      // Recommission node
+      recommissionNode(0, decommissionedNodeInfo);
+
+      usage = (Map<String, Map<String, String>>) JSON.parse(ns.getNodeUsage());
+      String nodeusageAfterRecommi =
+          decommissionState == AdminStates.DECOMMISSION_INPROGRESS
+              ? minUsageBeforeDecom
+              : zeroNodeUsage;
+      assertTrue(usage.get("nodeUsage").get("min").
+          equalsIgnoreCase(nodeusageAfterRecommi));
+    } finally {
+      cleanupFile(fileSys, file1);
+      cluster.shutdown();
+    }
+  }
 }


[10/14] hadoop git commit: YARN-2768. Avoid cloning Resource in FSAppAttempt#updateDemand. (Hong Zhiguo via kasha)

Posted by aw...@apache.org.
YARN-2768. Avoid cloning Resource in FSAppAttempt#updateDemand. (Hong Zhiguo via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5205a330
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5205a330
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5205a330

Branch: refs/heads/HADOOP-12111
Commit: 5205a330b387d2e133ee790b9fe7d5af3cd8bccc
Parents: 6f0a357
Author: Karthik Kambatla <ka...@apache.org>
Authored: Wed Jul 29 09:42:32 2015 -0700
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Wed Jul 29 09:42:32 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                       |  3 +++
 .../apache/hadoop/yarn/util/resource/Resources.java   | 14 +++++++++++++-
 .../resourcemanager/scheduler/fair/FSAppAttempt.java  |  4 ++--
 3 files changed, 18 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5205a330/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index be6a50c..910b85c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -378,6 +378,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3259. FairScheduler: Trigger fairShare updates on node events. 
     (Anubhav Dhoot via kasha)
 
+    YARN-2768. Avoid cloning Resource in FSAppAttempt#updateDemand.
+    (Hong Zhiguo via kasha)
+
   BUG FIXES
 
     YARN-3197. Confusing log generated by CapacityScheduler. (Varun Saxena 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5205a330/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index 472811a..503d456 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -150,7 +150,19 @@ public class Resources {
   public static Resource multiply(Resource lhs, double by) {
     return multiplyTo(clone(lhs), by);
   }
-  
+
+  /**
+   * Multiply @param rhs by @param by, and add the result to @param lhs
+   * without creating any new {@link Resource} object
+   */
+  public static Resource multiplyAndAddTo(
+      Resource lhs, Resource rhs, double by) {
+    lhs.setMemory(lhs.getMemory() + (int)(rhs.getMemory() * by));
+    lhs.setVirtualCores(lhs.getVirtualCores()
+        + (int)(rhs.getVirtualCores() * by));
+    return lhs;
+  }
+
   public static Resource multiplyAndNormalizeUp(
       ResourceCalculator calculator,Resource lhs, double by, Resource factor) {
     return calculator.multiplyAndNormalizeUp(lhs, by, factor);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5205a330/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 7419446..cfec915 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -801,8 +801,8 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
     synchronized (this) {
       for (Priority p : getPriorities()) {
         for (ResourceRequest r : getResourceRequests(p).values()) {
-          Resource total = Resources.multiply(r.getCapability(), r.getNumContainers());
-          Resources.addTo(demand, total);
+          Resources.multiplyAndAddTo(demand,
+              r.getCapability(), r.getNumContainers());
         }
       }
     }


[06/14] hadoop git commit: Revert "HDFS-6682. Add a metric to expose the timestamp of the oldest under-replicated block. (aajisaka)"

Posted by aw...@apache.org.
Revert "HDFS-6682. Add a metric to expose the timestamp of the oldest under-replicated block. (aajisaka)"

This reverts commit 02c01815eca656814febcdaca6115e5f53b9c746.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a1d6561
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a1d6561
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a1d6561

Branch: refs/heads/HADOOP-12111
Commit: 2a1d656196cf9750fa482cb10893684e8a2ce7c3
Parents: f4f1b8b
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed Jul 29 16:48:30 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed Jul 29 16:48:30 2015 +0900

----------------------------------------------------------------------
 .../hadoop-common/src/site/markdown/Metrics.md  |  1 -
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 --
 .../server/blockmanagement/BlockManager.java    |  4 --
 .../blockmanagement/UnderReplicatedBlocks.java  | 33 ++------------
 .../hdfs/server/namenode/FSNamesystem.java      |  9 +---
 .../TestUnderReplicatedBlocks.java              | 48 --------------------
 6 files changed, 5 insertions(+), 93 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a1d6561/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 2e6c095..646cda5 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -201,7 +201,6 @@ Each metrics record contains tags such as HAState and Hostname as additional inf
 | Name | Description |
 |:---- |:---- |
 | `MissingBlocks` | Current number of missing blocks |
-| `TimeOfTheOldestBlockToBeReplicated` | The timestamp of the oldest block to be replicated. If there are no under-replicated or corrupt blocks, return 0. |
 | `ExpiredHeartbeats` | Total number of expired heartbeats |
 | `TransactionsSinceLastCheckpoint` | Total number of transactions since last checkpoint |
 | `TransactionsSinceLastLogRoll` | Total number of transactions since last edit log roll |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a1d6561/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d30c258..9a0c6da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -747,9 +747,6 @@ Release 2.8.0 - UNRELEASED
     HDFS-8730. Clean up the import statements in ClientProtocol.
     (Takanobu Asanuma via wheat9)
 
-    HDFS-6682. Add a metric to expose the timestamp of the oldest
-    under-replicated block. (aajisaka)
-
     HDFS-8735. Inotify: All events classes should implement toString() API.
     (Surendra Singh Lilhore via aajisaka)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a1d6561/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 64603d0..7dce2a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -171,10 +171,6 @@ public class BlockManager implements BlockStatsMXBean {
   public int getPendingDataNodeMessageCount() {
     return pendingDNMessages.count();
   }
-  /** Used by metrics. */
-  public long getTimeOfTheOldestBlockToBeReplicated() {
-    return neededReplications.getTimeOfTheOldestBlockToBeReplicated();
-  }
 
   /**replicationRecheckInterval is how often namenode checks for new replication work*/
   private final long replicationRecheckInterval;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a1d6561/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
index 128aae6..ebc15b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
@@ -18,15 +18,10 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.Iterator;
-import java.util.LinkedHashMap;
 import java.util.List;
-import java.util.Map;
-
 import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.util.Time;
 
 /**
  * Keep prioritized queues of under replicated blocks.
@@ -87,9 +82,6 @@ class UnderReplicatedBlocks implements Iterable<BlockInfo> {
 
   /** The number of corrupt blocks with replication factor 1 */
   private int corruptReplOneBlocks = 0;
-  /** Keep timestamp when a block is put into the queue. */
-  private final Map<BlockInfo, Long> timestampsMap =
-      Collections.synchronizedMap(new LinkedHashMap<BlockInfo, Long>());
 
   /** Create an object. */
   UnderReplicatedBlocks() {
@@ -99,14 +91,13 @@ class UnderReplicatedBlocks implements Iterable<BlockInfo> {
   }
 
   /**
-   * Empty the queues and timestamps.
+   * Empty the queues.
    */
   synchronized void clear() {
     for (int i = 0; i < LEVEL; i++) {
       priorityQueues.get(i).clear();
     }
     corruptReplOneBlocks = 0;
-    timestampsMap.clear();
   }
 
   /** Return the total number of under replication blocks */
@@ -129,20 +120,6 @@ class UnderReplicatedBlocks implements Iterable<BlockInfo> {
     return size;
   }
 
-  /**
-   * Return the smallest timestamp of the under-replicated/corrupt blocks.
-   * If there are no under-replicated or corrupt blocks, return 0.
-   */
-  long getTimeOfTheOldestBlockToBeReplicated() {
-    synchronized (timestampsMap) {
-      if (timestampsMap.isEmpty()) {
-        return 0;
-      }
-      // Since we are using LinkedHashMap, the first value is the smallest.
-      return timestampsMap.entrySet().iterator().next().getValue();
-    }
-  }
-
   /** Return the number of corrupt blocks */
   synchronized int getCorruptBlockSize() {
     return priorityQueues.get(QUEUE_WITH_CORRUPT_BLOCKS).size();
@@ -221,7 +198,7 @@ class UnderReplicatedBlocks implements Iterable<BlockInfo> {
               + " has only {} replicas and need {} replicas so is added to" +
               " neededReplications at priority level {}", block, curReplicas,
           expectedReplicas, priLevel);
-      timestampsMap.put(block, Time.now());
+
       return true;
     }
     return false;
@@ -266,9 +243,8 @@ class UnderReplicatedBlocks implements Iterable<BlockInfo> {
     if(priLevel >= 0 && priLevel < LEVEL
         && priorityQueues.get(priLevel).remove(block)) {
       NameNode.blockStateChangeLog.debug(
-          "BLOCK* NameSystem.UnderReplicationBlock.remove: Removing block {}" +
-              " from priority queue {}", block, priLevel);
-      timestampsMap.remove(block);
+        "BLOCK* NameSystem.UnderReplicationBlock.remove: Removing block {}" +
+            " from priority queue {}", block, priLevel);
       return true;
     } else {
       // Try to remove the block from all queues if the block was
@@ -278,7 +254,6 @@ class UnderReplicatedBlocks implements Iterable<BlockInfo> {
           NameNode.blockStateChangeLog.debug(
               "BLOCK* NameSystem.UnderReplicationBlock.remove: Removing block" +
                   " {} from priority queue {}", block, priLevel);
-          timestampsMap.remove(block);
           return true;
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a1d6561/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 0a2422e..0b44431 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3770,14 +3770,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     // not locking
     return blockManager.getMissingReplOneBlocksCount();
   }
-
-  @Metric({"TimeOfTheOldestBlockToBeReplicated",
-      "The timestamp of the oldest block to be replicated. If there are no" +
-      "under-replicated or corrupt blocks, return 0."})
-  public long getTimeOfTheOldestBlockToBeReplicated() {
-    return blockManager.getTimeOfTheOldestBlockToBeReplicated();
-  }
-
+  
   @Metric({"ExpiredHeartbeats", "Number of expired heartbeats"})
   public int getExpiredHeartbeats() {
     return datanodeStatistics.getExpiredHeartbeats();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a1d6561/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
index 7615cee..27b35f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
@@ -28,10 +28,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.util.Time;
 import org.junit.Test;
 
 import java.util.Iterator;
@@ -148,50 +146,4 @@ public class TestUnderReplicatedBlocks {
 
   }
 
-  @Test
-  public void testGetTimeOfTheOldestBlockToBeReplicated() {
-    UnderReplicatedBlocks blocks = new UnderReplicatedBlocks();
-    BlockInfo block1 = new BlockInfoContiguous(new Block(1), (short) 1);
-    BlockInfo block2 = new BlockInfoContiguous(new Block(2), (short) 1);
-
-    // if there are no under-replicated or corrupt blocks, return 0
-    assertEquals(blocks.getTimeOfTheOldestBlockToBeReplicated(), 0L);
-
-    // add block1, add block2, remove block1, remove block2
-    long time1 = Time.now();
-    blocks.add(block1, 1, 0, 3);
-    long time2 = Time.now();
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() >= time1);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() <= time2);
-
-    blocks.add(block2, 2, 0, 3);
-    long time3 = Time.now();
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() >= time1);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() <= time2);
-
-    blocks.remove(block1, UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() >= time2);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() <= time3);
-
-    blocks.remove(block2, UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED);
-    assertEquals(blocks.getTimeOfTheOldestBlockToBeReplicated(), 0L);
-
-    // add block2, add block1, remove block1, remove block2
-    time1 = Time.now();
-    blocks.add(block2, 2, 0, 3);
-    time2 = Time.now();
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() >= time1);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() <= time2);
-
-    blocks.add(block1, 1, 0, 3);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() >= time1);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() <= time2);
-
-    blocks.remove(block1, UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() >= time1);
-    assertTrue(blocks.getTimeOfTheOldestBlockToBeReplicated() <= time2);
-
-    blocks.remove(block2, UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED);
-    assertEquals(blocks.getTimeOfTheOldestBlockToBeReplicated(), 0L);
-  }
 }


[08/14] hadoop git commit: YARN-3950. Add unique SHELL_ID environment variable to DistributedShell. Contributed by Robert Kanter

Posted by aw...@apache.org.
YARN-3950. Add unique SHELL_ID environment variable to DistributedShell. Contributed by Robert Kanter


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b2bd921
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b2bd921
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b2bd921

Branch: refs/heads/HADOOP-12111
Commit: 2b2bd9214604bc2e14e41e08d30bf86f512151bd
Parents: 6374ee0
Author: Jason Lowe <jl...@apache.org>
Authored: Wed Jul 29 15:16:40 2015 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Wed Jul 29 15:16:40 2015 +0000

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 ++
 .../distributedshell/ApplicationMaster.java     | 31 +++++++++++++++-----
 .../distributedshell/TestDSAppMaster.java       | 11 ++++++-
 3 files changed, 36 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2bd921/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b4666e8..be6a50c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -358,6 +358,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3026. Move application-specific container allocation logic from
     LeafQueue to FiCaSchedulerApp. (Wangda Tan via jianhe)
 
+    YARN-3950. Add unique SHELL_ID environment variable to DistributedShell
+    (Robert Kanter via jlowe)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2bd921/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index b28c0c9..5d2d6c2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -182,6 +182,8 @@ public class ApplicationMaster {
     DS_APP_ATTEMPT, DS_CONTAINER
   }
 
+  private static final String YARN_SHELL_ID = "YARN_SHELL_ID";
+
   // Configuration
   private Configuration conf;
 
@@ -279,6 +281,8 @@ public class ApplicationMaster {
   private final String linux_bash_command = "bash";
   private final String windows_command = "cmd /c";
 
+  private int yarnShellIdCounter = 1;
+
   @VisibleForTesting
   protected final Set<ContainerId> launchedContainers =
       Collections.newSetFromMap(new ConcurrentHashMap<ContainerId, Boolean>());
@@ -803,8 +807,11 @@ public class ApplicationMaster {
           + allocatedContainers.size());
       numAllocatedContainers.addAndGet(allocatedContainers.size());
       for (Container allocatedContainer : allocatedContainers) {
+        String yarnShellId = Integer.toString(yarnShellIdCounter);
+        yarnShellIdCounter++;
         LOG.info("Launching shell command on a new container."
             + ", containerId=" + allocatedContainer.getId()
+            + ", yarnShellId=" + yarnShellId
             + ", containerNode=" + allocatedContainer.getNodeId().getHost()
             + ":" + allocatedContainer.getNodeId().getPort()
             + ", containerNodeURI=" + allocatedContainer.getNodeHttpAddress()
@@ -815,7 +822,8 @@ public class ApplicationMaster {
         // + ", containerToken"
         // +allocatedContainer.getContainerToken().getIdentifier().toString());
 
-        Thread launchThread = createLaunchContainerThread(allocatedContainer);
+        Thread launchThread = createLaunchContainerThread(allocatedContainer,
+            yarnShellId);
 
         // launch and start the container on a separate thread to keep
         // the main thread unblocked
@@ -927,7 +935,8 @@ public class ApplicationMaster {
   private class LaunchContainerRunnable implements Runnable {
 
     // Allocated container
-    Container container;
+    private Container container;
+    private String shellId;
 
     NMCallbackHandler containerListener;
 
@@ -935,10 +944,11 @@ public class ApplicationMaster {
      * @param lcontainer Allocated container
      * @param containerListener Callback handler of the container
      */
-    public LaunchContainerRunnable(
-        Container lcontainer, NMCallbackHandler containerListener) {
+    public LaunchContainerRunnable(Container lcontainer,
+        NMCallbackHandler containerListener, String shellId) {
       this.container = lcontainer;
       this.containerListener = containerListener;
+      this.shellId = shellId;
     }
 
     @Override
@@ -949,7 +959,7 @@ public class ApplicationMaster {
      */
     public void run() {
       LOG.info("Setting up container launch container for containerid="
-          + container.getId());
+          + container.getId() + " with shellid=" + shellId);
 
       // Set the local resources
       Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
@@ -1038,8 +1048,11 @@ public class ApplicationMaster {
       // download anyfiles in the distributed file-system. The tokens are
       // otherwise also useful in cases, for e.g., when one is running a
       // "hadoop dfs" command inside the distributed shell.
+      Map<String, String> myShellEnv = new HashMap<String, String>(shellEnv);
+      myShellEnv.put(YARN_SHELL_ID, shellId);
       ContainerLaunchContext ctx = ContainerLaunchContext.newInstance(
-        localResources, shellEnv, commands, null, allTokens.duplicate(), null);
+        localResources, myShellEnv, commands, null, allTokens.duplicate(),
+          null);
       containerListener.addContainer(container.getId(), container);
       nmClientAsync.startContainerAsync(container, ctx);
     }
@@ -1189,9 +1202,11 @@ public class ApplicationMaster {
   }
 
   @VisibleForTesting
-  Thread createLaunchContainerThread(Container allocatedContainer) {
+  Thread createLaunchContainerThread(Container allocatedContainer,
+      String shellId) {
     LaunchContainerRunnable runnableLaunchContainer =
-        new LaunchContainerRunnable(allocatedContainer, containerListener);
+        new LaunchContainerRunnable(allocatedContainer, containerListener,
+            shellId);
     return new Thread(runnableLaunchContainer);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2bd921/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java
index 0fed14d..2789d04 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java
@@ -41,6 +41,7 @@ import org.mockito.Mockito;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 
 /**
@@ -51,11 +52,14 @@ public class TestDSAppMaster {
 
   static class TestAppMaster extends ApplicationMaster {
     private int threadsLaunched = 0;
+    public List<String> yarnShellIds = new ArrayList<String>();
 
     @Override
-    protected Thread createLaunchContainerThread(Container allocatedContainer) {
+    protected Thread createLaunchContainerThread(Container allocatedContainer,
+        String shellId) {
       threadsLaunched++;
       launchedContainers.add(allocatedContainer.getId());
+      yarnShellIds.add(shellId);
       return new Thread();
     }
 
@@ -101,6 +105,8 @@ public class TestDSAppMaster {
     Mockito.verifyZeroInteractions(mockClient);
     Assert.assertEquals("Incorrect number of threads launched", 1,
         master.threadsLaunched);
+    Assert.assertEquals("Incorrect YARN Shell IDs",
+        Arrays.asList("1"), master.yarnShellIds);
 
     // now send 3 extra containers
     containers.clear();
@@ -117,6 +123,9 @@ public class TestDSAppMaster {
     Assert.assertEquals("Incorrect number of threads launched", 4,
         master.threadsLaunched);
 
+    Assert.assertEquals("Incorrect YARN Shell IDs",
+        Arrays.asList("1", "2", "3", "4"), master.yarnShellIds);
+
     // make sure we handle completion events correctly
     List<ContainerStatus> status = new ArrayList<>();
     status.add(generateContainerStatus(id1, ContainerExitStatus.SUCCESS));


[11/14] hadoop git commit: YARN-3919. NPEs' while stopping service after exception during CommonNodeLabelsManager#start. (varun saxena via rohithsharmaks)

Posted by aw...@apache.org.
YARN-3919. NPEs' while stopping service after exception during CommonNodeLabelsManager#start. (varun saxena via rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c020b62c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c020b62c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c020b62c

Branch: refs/heads/HADOOP-12111
Commit: c020b62cf8de1f3baadc9d2f3410640ef7880543
Parents: 5205a33
Author: rohithsharmaks <ro...@apache.org>
Authored: Wed Jul 29 22:20:32 2015 +0530
Committer: rohithsharmaks <ro...@apache.org>
Committed: Wed Jul 29 23:30:47 2015 +0530

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                              | 3 +++
 .../java/org/apache/hadoop/yarn/event/AsyncDispatcher.java   | 3 ++-
 .../hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java    | 8 ++------
 3 files changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c020b62c/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 910b85c..8e8a76b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -707,6 +707,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3982. container-executor parsing of container-executor.cfg broken in
     trunk and branch-2. (Varun Vasudev via xgong)
 
+    YARN-3919. NPEs' while stopping service after exception during
+    CommonNodeLabelsManager#start. (varun saxane via rohithsharmaks)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c020b62c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index 48312a3..f670112 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -139,7 +139,8 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
       blockNewEvents = true;
       LOG.info("AsyncDispatcher is draining to stop, igonring any new events.");
       synchronized (waitForDrained) {
-        while (!drained && eventHandlingThread.isAlive()) {
+        while (!drained && eventHandlingThread != null
+            && eventHandlingThread.isAlive()) {
           waitForDrained.wait(1000);
           LOG.info("Waiting for AsyncDispatcher to drain. Thread state is :" +
               eventHandlingThread.getState());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c020b62c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
index abf07e8..20dc67c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
@@ -92,12 +93,7 @@ public class FileSystemNodeLabelsStore extends NodeLabelsStore {
 
   @Override
   public void close() throws IOException {
-    try {
-      fs.close();
-      editlogOs.close();
-    } catch (IOException e) {
-      LOG.warn("Exception happened whiling shutting down,", e);
-    }
+    IOUtils.cleanup(LOG, fs, editlogOs);
   }
 
   private void setFileSystem(Configuration conf) throws IOException {


[03/14] hadoop git commit: HDFS-8811. Move BlockStoragePolicy name's constants from HdfsServerConstants.java to HdfsConstants.java (Contributed by Vinayakumar B)

Posted by aw...@apache.org.
HDFS-8811. Move BlockStoragePolicy name's constants from HdfsServerConstants.java to HdfsConstants.java (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50887e5b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50887e5b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50887e5b

Branch: refs/heads/HADOOP-12111
Commit: 50887e5b07b6abb20c0edd74211e5612dc7b16da
Parents: 0712a81
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Jul 29 11:45:33 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Wed Jul 29 11:45:33 2015 +0530

----------------------------------------------------------------------
 .../hadoop/hdfs/protocol/HdfsConstants.java     |  3 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../BlockStoragePolicySuite.java                |  6 ++--
 .../hdfs/server/common/HdfsServerConstants.java |  3 --
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |  2 +-
 .../hadoop/hdfs/TestBlockStoragePolicy.java     | 30 ++++++++++----------
 .../hdfs/server/mover/TestStorageMover.java     |  8 +++---
 7 files changed, 29 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50887e5b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 58c7ea1..443576d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -37,6 +37,9 @@ public final class HdfsConstants {
   public static final String MEMORY_STORAGE_POLICY_NAME = "LAZY_PERSIST";
   public static final String ALLSSD_STORAGE_POLICY_NAME = "ALL_SSD";
   public static final String ONESSD_STORAGE_POLICY_NAME = "ONE_SSD";
+  public static final String HOT_STORAGE_POLICY_NAME = "HOT";
+  public static final String WARM_STORAGE_POLICY_NAME = "WARM";
+  public static final String COLD_STORAGE_POLICY_NAME = "COLD";
   // TODO should be conf injected?
   public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50887e5b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ef12720..654c40f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -757,6 +757,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-8180. AbstractFileSystem Implementation for WebHdfs. (snayak via jghoman)
 
+    HDFS-8811. Move BlockStoragePolicy name's constants from
+    HdfsServerConstants.java to HdfsConstants.java (vinayakumarb)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50887e5b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
index 2a71c29..7e6c2b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
@@ -69,18 +69,18 @@ public class BlockStoragePolicySuite {
         new StorageType[]{StorageType.SSD, StorageType.DISK});
     final byte hotId = HdfsServerConstants.HOT_STORAGE_POLICY_ID;
     policies[hotId] = new BlockStoragePolicy(hotId,
-        HdfsServerConstants.HOT_STORAGE_POLICY_NAME,
+        HdfsConstants.HOT_STORAGE_POLICY_NAME,
         new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY,
         new StorageType[]{StorageType.ARCHIVE});
     final byte warmId = HdfsServerConstants.WARM_STORAGE_POLICY_ID;
     policies[warmId] = new BlockStoragePolicy(warmId,
-        HdfsServerConstants.WARM_STORAGE_POLICY_NAME,
+        HdfsConstants.WARM_STORAGE_POLICY_NAME,
         new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
         new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
         new StorageType[]{StorageType.DISK, StorageType.ARCHIVE});
     final byte coldId = HdfsServerConstants.COLD_STORAGE_POLICY_ID;
     policies[coldId] = new BlockStoragePolicy(coldId,
-        HdfsServerConstants.COLD_STORAGE_POLICY_NAME,
+        HdfsConstants.COLD_STORAGE_POLICY_NAME,
         new StorageType[]{StorageType.ARCHIVE}, StorageType.EMPTY_ARRAY,
         StorageType.EMPTY_ARRAY);
     return new BlockStoragePolicySuite(hotId, policies);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50887e5b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index 26a7ab3..11194dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -99,9 +99,6 @@ public interface HdfsServerConstants {
   };
   byte[] DOT_SNAPSHOT_DIR_BYTES
               = DFSUtil.string2Bytes(HdfsConstants.DOT_SNAPSHOT_DIR);
-  String HOT_STORAGE_POLICY_NAME = "HOT";
-  String WARM_STORAGE_POLICY_NAME = "WARM";
-  String COLD_STORAGE_POLICY_NAME = "COLD";
   byte MEMORY_STORAGE_POLICY_ID = 15;
   byte ALLSSD_STORAGE_POLICY_ID = 12;
   byte ONESSD_STORAGE_POLICY_ID = 10;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50887e5b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 89ee674..5e1c597 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -1253,7 +1253,7 @@ public class DFSTestUtil {
     s2.close();
     // OP_SET_STORAGE_POLICY 45
     filesystem.setStoragePolicy(pathFileCreate,
-        HdfsServerConstants.HOT_STORAGE_POLICY_NAME);
+        HdfsConstants.HOT_STORAGE_POLICY_NAME);
     // OP_RENAME_OLD 1
     final Path pathFileMoved = new Path("/file_moved");
     filesystem.rename(pathFileCreate, pathFileMoved);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50887e5b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
index afd2597..78e1e58 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
@@ -88,7 +88,7 @@ public class TestBlockStoragePolicy {
     try {
       cluster.waitActive();
       cluster.getFileSystem().setStoragePolicy(new Path("/"),
-          HdfsServerConstants.COLD_STORAGE_POLICY_NAME);
+          HdfsConstants.COLD_STORAGE_POLICY_NAME);
     } finally {
       cluster.shutdown();
     }
@@ -108,7 +108,7 @@ public class TestBlockStoragePolicy {
     try {
       cluster.waitActive();
       cluster.getFileSystem().setStoragePolicy(new Path("/"),
-          HdfsServerConstants.COLD_STORAGE_POLICY_NAME);
+          HdfsConstants.COLD_STORAGE_POLICY_NAME);
     } finally {
       cluster.shutdown();
     }
@@ -865,7 +865,7 @@ public class TestBlockStoragePolicy {
 
       final Path invalidPath = new Path("/invalidPath");
       try {
-        fs.setStoragePolicy(invalidPath, HdfsServerConstants.WARM_STORAGE_POLICY_NAME);
+        fs.setStoragePolicy(invalidPath, HdfsConstants.WARM_STORAGE_POLICY_NAME);
         Assert.fail("Should throw a FileNotFoundException");
       } catch (FileNotFoundException e) {
         GenericTestUtils.assertExceptionContains(invalidPath.toString(), e);
@@ -878,17 +878,17 @@ public class TestBlockStoragePolicy {
         GenericTestUtils.assertExceptionContains(invalidPath.toString(), e);
       }
 
-      fs.setStoragePolicy(fooFile, HdfsServerConstants.COLD_STORAGE_POLICY_NAME);
-      fs.setStoragePolicy(barDir, HdfsServerConstants.WARM_STORAGE_POLICY_NAME);
-      fs.setStoragePolicy(barFile2, HdfsServerConstants.HOT_STORAGE_POLICY_NAME);
+      fs.setStoragePolicy(fooFile, HdfsConstants.COLD_STORAGE_POLICY_NAME);
+      fs.setStoragePolicy(barDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);
+      fs.setStoragePolicy(barFile2, HdfsConstants.HOT_STORAGE_POLICY_NAME);
       Assert.assertEquals("File storage policy should be COLD",
-          HdfsServerConstants.COLD_STORAGE_POLICY_NAME,
+          HdfsConstants.COLD_STORAGE_POLICY_NAME,
           fs.getStoragePolicy(fooFile).getName());
       Assert.assertEquals("File storage policy should be WARM",
-          HdfsServerConstants.WARM_STORAGE_POLICY_NAME,
+          HdfsConstants.WARM_STORAGE_POLICY_NAME,
           fs.getStoragePolicy(barDir).getName());
       Assert.assertEquals("File storage policy should be HOT",
-          HdfsServerConstants.HOT_STORAGE_POLICY_NAME,
+          HdfsConstants.HOT_STORAGE_POLICY_NAME,
           fs.getStoragePolicy(barFile2).getName());
 
       dirList = fs.getClient().listPaths(dir.toString(),
@@ -937,7 +937,7 @@ public class TestBlockStoragePolicy {
       DFSTestUtil.createFile(fs, fooFile1, FILE_LEN, REPLICATION, 0L);
       DFSTestUtil.createFile(fs, fooFile2, FILE_LEN, REPLICATION, 0L);
 
-      fs.setStoragePolicy(fooDir, HdfsServerConstants.WARM_STORAGE_POLICY_NAME);
+      fs.setStoragePolicy(fooDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);
 
       HdfsFileStatus[] dirList = fs.getClient().listPaths(dir.toString(),
           HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
@@ -949,7 +949,7 @@ public class TestBlockStoragePolicy {
       // take snapshot
       SnapshotTestHelper.createSnapshot(fs, dir, "s1");
       // change the storage policy of fooFile1
-      fs.setStoragePolicy(fooFile1, HdfsServerConstants.COLD_STORAGE_POLICY_NAME);
+      fs.setStoragePolicy(fooFile1, HdfsConstants.COLD_STORAGE_POLICY_NAME);
 
       fooList = fs.getClient().listPaths(fooDir.toString(),
           HdfsFileStatus.EMPTY_NAME).getPartialListing();
@@ -972,7 +972,7 @@ public class TestBlockStoragePolicy {
           HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD);
 
       // change the storage policy of foo dir
-      fs.setStoragePolicy(fooDir, HdfsServerConstants.HOT_STORAGE_POLICY_NAME);
+      fs.setStoragePolicy(fooDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
       // /dir/foo is now hot
       dirList = fs.getClient().listPaths(dir.toString(),
           HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
@@ -1089,7 +1089,7 @@ public class TestBlockStoragePolicy {
    */
   @Test
   public void testChangeHotFileRep() throws Exception {
-    testChangeFileRep(HdfsServerConstants.HOT_STORAGE_POLICY_NAME, HOT,
+    testChangeFileRep(HdfsConstants.HOT_STORAGE_POLICY_NAME, HOT,
         new StorageType[]{StorageType.DISK, StorageType.DISK,
             StorageType.DISK},
         new StorageType[]{StorageType.DISK, StorageType.DISK, StorageType.DISK,
@@ -1103,7 +1103,7 @@ public class TestBlockStoragePolicy {
    */
   @Test
   public void testChangeWarmRep() throws Exception {
-    testChangeFileRep(HdfsServerConstants.WARM_STORAGE_POLICY_NAME, WARM,
+    testChangeFileRep(HdfsConstants.WARM_STORAGE_POLICY_NAME, WARM,
         new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
             StorageType.ARCHIVE},
         new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
@@ -1116,7 +1116,7 @@ public class TestBlockStoragePolicy {
    */
   @Test
   public void testChangeColdRep() throws Exception {
-    testChangeFileRep(HdfsServerConstants.COLD_STORAGE_POLICY_NAME, COLD,
+    testChangeFileRep(HdfsConstants.COLD_STORAGE_POLICY_NAME, COLD,
         new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,
             StorageType.ARCHIVE},
         new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50887e5b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
index 3095f30..4516080 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -53,7 +54,6 @@ import org.apache.hadoop.hdfs.server.balancer.ExitStatus;
 import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
@@ -99,9 +99,9 @@ public class TestStorageMover {
     DEFAULT_CONF.setLong(DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY, 2000L);
 
     DEFAULT_POLICIES = BlockStoragePolicySuite.createDefaultSuite();
-    HOT = DEFAULT_POLICIES.getPolicy(HdfsServerConstants.HOT_STORAGE_POLICY_NAME);
-    WARM = DEFAULT_POLICIES.getPolicy(HdfsServerConstants.WARM_STORAGE_POLICY_NAME);
-    COLD = DEFAULT_POLICIES.getPolicy(HdfsServerConstants.COLD_STORAGE_POLICY_NAME);
+    HOT = DEFAULT_POLICIES.getPolicy(HdfsConstants.HOT_STORAGE_POLICY_NAME);
+    WARM = DEFAULT_POLICIES.getPolicy(HdfsConstants.WARM_STORAGE_POLICY_NAME);
+    COLD = DEFAULT_POLICIES.getPolicy(HdfsConstants.COLD_STORAGE_POLICY_NAME);
     TestBalancer.initTestSetup();
     Dispatcher.setDelayAfterErrors(1000L);
   }


[13/14] hadoop git commit: Merge branch 'trunk' into HADOOP-12111

Posted by aw...@apache.org.
Merge branch 'trunk' into HADOOP-12111


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d80b512
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d80b512
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d80b512

Branch: refs/heads/HADOOP-12111
Commit: 3d80b51245a18e05b6b48e65c96428e1ab97def8
Parents: 03335bb ddc867ce
Author: Allen Wittenauer <aw...@apache.org>
Authored: Wed Jul 29 18:26:43 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Jul 29 18:26:43 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   6 +
 .../main/java/org/apache/hadoop/fs/FsShell.java |  11 +-
 .../hadoop/fs/permission/UmaskParser.java       |   2 +-
 .../src/main/resources/core-default.xml         |  12 ++
 .../hadoop-common/src/site/markdown/Metrics.md  |   1 -
 .../fs/FileContextMainOperationsBaseTest.java   |   4 +-
 .../java/org/apache/hadoop/fs/TestFsShell.java  |  24 +++
 .../apache/hadoop/tracing/SetSpanReceiver.java  |   2 +-
 .../hadoop/hdfs/protocol/HdfsConstants.java     |   3 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  16 +-
 hadoop-hdfs-project/hadoop-hdfs/pom.xml         |   1 +
 .../java/org/apache/hadoop/fs/SWebHdfs.java     |  51 ++++++
 .../main/java/org/apache/hadoop/fs/WebHdfs.java |  51 ++++++
 .../main/java/org/apache/hadoop/fs/package.html |  26 +++
 .../server/blockmanagement/BlockManager.java    |   4 -
 .../BlockStoragePolicySuite.java                |   6 +-
 .../blockmanagement/UnderReplicatedBlocks.java  |  33 +---
 .../hdfs/server/common/HdfsServerConstants.java |   3 -
 .../hdfs/server/namenode/FSNamesystem.java      |  15 +-
 .../src/main/webapps/hdfs/dfshealth.html        |  44 +++---
 .../src/main/webapps/hdfs/dfshealth.js          |  46 +++++-
 .../src/main/webapps/static/hadoop.css          |  51 ++++++
 .../src/main/webapps/static/moment.min.js       |   7 +
 .../TestSWebHdfsFileContextMainOperations.java  | 110 +++++++++++++
 .../TestWebHdfsFileContextMainOperations.java   | 157 +++++++++++++++++++
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |   2 +-
 .../hadoop/hdfs/TestBlockStoragePolicy.java     | 118 ++++++++++----
 .../apache/hadoop/hdfs/TestDecommission.java    |  98 ++++++++++++
 .../org/apache/hadoop/hdfs/TestReplication.java |  26 ++-
 .../TestUnderReplicatedBlocks.java              |  48 ------
 .../hdfs/server/mover/TestStorageMover.java     |   8 +-
 .../apache/hadoop/security/TestPermission.java  |  26 +++
 hadoop-yarn-project/CHANGES.txt                 |   9 ++
 .../distributedshell/ApplicationMaster.java     |  31 +++-
 .../distributedshell/TestDSAppMaster.java       |  11 +-
 .../hadoop/yarn/event/AsyncDispatcher.java      |   3 +-
 .../nodelabels/FileSystemNodeLabelsStore.java   |   8 +-
 .../hadoop/yarn/util/resource/Resources.java    |  14 +-
 .../scheduler/fair/FSAppAttempt.java            |   4 +-
 39 files changed, 907 insertions(+), 185 deletions(-)
----------------------------------------------------------------------



[14/14] hadoop git commit: HADOOP-12204. releasedocmaker should pass pylint (Kengo Seki via aw)

Posted by aw...@apache.org.
HADOOP-12204. releasedocmaker should pass pylint (Kengo Seki via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42d0c0fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42d0c0fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42d0c0fa

Branch: refs/heads/HADOOP-12111
Commit: 42d0c0fac1c2b9d1822193762512e86a442ee08e
Parents: 3d80b51
Author: Allen Wittenauer <aw...@apache.org>
Authored: Wed Jul 29 18:27:21 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Jul 29 18:27:21 2015 -0700

----------------------------------------------------------------------
 dev-support/releasedocmaker.py | 1015 ++++++++++++++++++-----------------
 1 file changed, 511 insertions(+), 504 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42d0c0fa/dev-support/releasedocmaker.py
----------------------------------------------------------------------
diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py
index c59ae99..37bd58a 100755
--- a/dev-support/releasedocmaker.py
+++ b/dev-support/releasedocmaker.py
@@ -19,21 +19,20 @@
 from glob import glob
 from optparse import OptionParser
 from time import gmtime, strftime
-import pprint
 import os
 import re
 import sys
 import urllib
 import urllib2
 try:
-  import json
+    import json
 except ImportError:
-  import simplejson as json
+    import simplejson as json
 
-releaseVersion={}
-namePattern = re.compile(r' \([0-9]+\)')
+RELEASE_VERSION = {}
+NAME_PATTERN = re.compile(r' \([0-9]+\)')
 
-asflicense='''
+ASF_LICENSE = '''
 <!---
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -53,524 +52,532 @@ asflicense='''
 -->
 '''
 
-def clean(str):
-  return tableclean(re.sub(namePattern, "", str))
+def clean(_str):
+    return tableclean(re.sub(NAME_PATTERN, "", _str))
 
-def formatComponents(str):
-  str = re.sub(namePattern, '', str).replace("'", "")
-  if str != "":
-    ret = str
-  else:
-    # some markdown parsers don't like empty tables
-    ret = "."
-  return clean(ret)
+def format_components(_str):
+    _str = re.sub(NAME_PATTERN, '', _str).replace("'", "")
+    if _str != "":
+        ret = _str
+    else:
+        # some markdown parsers don't like empty tables
+        ret = "."
+    return clean(ret)
 
 # convert to utf-8
 # protect some known md metachars
 # or chars that screw up doxia
-def tableclean(str):
-  str=str.encode('utf-8')
-  str=str.replace("_","\_")
-  str=str.replace("\r","")
-  str=str.rstrip()
-  return str
+def tableclean(_str):
+    _str = _str.encode('utf-8')
+    _str = _str.replace("_", r"\_")
+    _str = _str.replace("\r", "")
+    _str = _str.rstrip()
+    return _str
 
 # same thing as tableclean,
 # except table metachars are also
 # escaped as well as more
 # things we don't want doxia to
 # screw up
-def notableclean(str):
-  str=tableclean(str)
-  str=str.replace("|","\|")
-  str=str.replace("<","\<")
-  str=str.replace(">","\>")
-  str=str.replace("*","\*")
-  str=str.rstrip()
-  return str
+def notableclean(_str):
+    _str = tableclean(_str)
+    _str = _str.replace("|", r"\|")
+    _str = _str.replace("<", r"\<")
+    _str = _str.replace(">", r"\>")
+    _str = _str.replace("*", r"\*")
+    _str = _str.rstrip()
+    return _str
 
 # clean output dir
-def cleanOutputDir(dir):
-    files = os.listdir(dir)
+def clean_output_dir(directory):
+    files = os.listdir(directory)
     for name in files:
-        os.remove(os.path.join(dir,name))
-    os.rmdir(dir)
+        os.remove(os.path.join(directory, name))
+    os.rmdir(directory)
 
 def mstr(obj):
-  if (obj is None):
-    return ""
-  return unicode(obj)
-
-def buildindex(title,license):
-  versions=reversed(sorted(glob("[0-9]*.[0-9]*.[0-9]*")))
-  with open("index.md","w") as indexfile:
-    if license is True:
-      indexfile.write(asflicense)
-    for v in versions:
-      indexfile.write("* %s v%s\n" % (title,v))
-      for k in ("Changes","Release Notes"):
-        indexfile.write("    * %s (%s/%s.%s.html)\n" \
-          % (k,v,k.upper().replace(" ",""),v))
-  indexfile.close()
-
-class GetVersions:
-  """ yo """
-  def __init__(self,versions, projects):
-    versions = versions
-    projects = projects
-    self.newversions = []
-    pp = pprint.PrettyPrinter(indent=4)
-    at=0
-    end=1
-    count=100
-    versions.sort()
-    print "Looking for %s through %s"%(versions[0],versions[-1])
-    for p in projects:
-      resp = urllib2.urlopen("https://issues.apache.org/jira/rest/api/2/project/%s/versions"%p)
-      data = json.loads(resp.read())
-      for d in data:
-        if d['name'][0].isdigit and versions[0] <= d['name'] and d['name'] <= versions[-1]:
-          print "Adding %s to the list" % d['name']
-          self.newversions.append(d['name'])
-    newlist=list(set(self.newversions))
-    self.newversions=newlist
-
-  def getlist(self):
-      pp = pprint.PrettyPrinter(indent=4)
-      return(self.newversions)
-
-class Version:
-  """Represents a version number"""
-  def __init__(self, data):
-    self.mod = False
-    self.data = data
-    found = re.match('^((\d+)(\.\d+)*).*$', data)
-    if (found):
-      self.parts = [ int(p) for p in found.group(1).split('.') ]
-    else:
-      self.parts = []
-    # backfill version with zeroes if missing parts
-    self.parts.extend((0,) * (3 - len(self.parts)))
-
-  def __str__(self):
-    if (self.mod):
-      return '.'.join([ str(p) for p in self.parts ])
-    return self.data
-
-  def __cmp__(self, other):
-    return cmp(self.parts, other.parts)
-
-class Jira:
-  """A single JIRA"""
-
-  def __init__(self, data, parent):
-    self.key = data['key']
-    self.fields = data['fields']
-    self.parent = parent
-    self.notes = None
-    self.incompat = None
-    self.reviewed = None
-
-  def getId(self):
-    return mstr(self.key)
-
-  def getDescription(self):
-    return mstr(self.fields['description'])
-
-  def getReleaseNote(self):
-    if (self.notes is None):
-      field = self.parent.fieldIdMap['Release Note']
-      if (self.fields.has_key(field)):
-        self.notes=mstr(self.fields[field])
-      else:
-        self.notes=self.getDescription()
-    return self.notes
-
-  def getPriority(self):
-    ret = ""
-    pri = self.fields['priority']
-    if(pri is not None):
-      ret = pri['name']
-    return mstr(ret)
-
-  def getAssignee(self):
-    ret = ""
-    mid = self.fields['assignee']
-    if(mid is not None):
-      ret = mid['displayName']
-    return mstr(ret)
-
-  def getComponents(self):
-    if (len(self.fields['components'])>0):
-      return ", ".join([ comp['name'] for comp in self.fields['components'] ])
-    else:
-      return ""
-
-  def getSummary(self):
-    return self.fields['summary']
-
-  def getType(self):
-    ret = ""
-    mid = self.fields['issuetype']
-    if(mid is not None):
-      ret = mid['name']
-    return mstr(ret)
-
-  def getReporter(self):
-    ret = ""
-    mid = self.fields['reporter']
-    if(mid is not None):
-      ret = mid['displayName']
-    return mstr(ret)
-
-  def getProject(self):
-    ret = ""
-    mid = self.fields['project']
-    if(mid is not None):
-      ret = mid['key']
-    return mstr(ret)
-
-  def __cmp__(self,other):
-    selfsplit=self.getId().split('-')
-    othersplit=other.getId().split('-')
-    v1=cmp(selfsplit[0],othersplit[0])
-    if (v1!=0):
-      return v1
-    else:
-      if selfsplit[1] < othersplit[1]:
+    if obj is None:
+        return ""
+    return unicode(obj)
+
+def buildindex(title, asf_license):
+    versions = reversed(sorted(glob("[0-9]*.[0-9]*.[0-9]*")))
+    with open("index.md", "w") as indexfile:
+        if asf_license is True:
+            indexfile.write(ASF_LICENSE)
+        for version in versions:
+            indexfile.write("* %s v%s\n" % (title, version))
+            for k in ("Changes", "Release Notes"):
+                indexfile.write("    * %s (%s/%s.%s.html)\n" \
+                    % (k, version, k.upper().replace(" ", ""), version))
+    indexfile.close()
+
+class GetVersions(object):
+    """ yo """
+    def __init__(self, versions, projects):
+        versions = versions
+        projects = projects
+        self.newversions = []
+        versions.sort()
+        print "Looking for %s through %s"%(versions[0], versions[-1])
+        for project in projects:
+            url = "https://issues.apache.org/jira/rest/api/2/project/%s/versions" % project
+            resp = urllib2.urlopen(url)
+            datum = json.loads(resp.read())
+            for data in datum:
+                name = data['name']
+                if name[0].isdigit and versions[0] <= name and name <= versions[-1]:
+                    print "Adding %s to the list" % name
+                    self.newversions.append(name)
+        newlist = list(set(self.newversions))
+        self.newversions = newlist
+
+    def getlist(self):
+        return self.newversions
+
+class Version(object):
+    """Represents a version number"""
+    def __init__(self, data):
+        self.mod = False
+        self.data = data
+        found = re.match(r'^((\d+)(\.\d+)*).*$', data)
+        if found:
+            self.parts = [int(p) for p in found.group(1).split('.')]
+        else:
+            self.parts = []
+        # backfill version with zeroes if missing parts
+        self.parts.extend((0,) * (3 - len(self.parts)))
+
+    def __str__(self):
+        if self.mod:
+            return '.'.join([str(p) for p in self.parts])
+        return self.data
+
+    def __cmp__(self, other):
+        return cmp(self.parts, other.parts)
+
+class Jira(object):
+    """A single JIRA"""
+
+    def __init__(self, data, parent):
+        self.key = data['key']
+        self.fields = data['fields']
+        self.parent = parent
+        self.notes = None
+        self.incompat = None
+        self.reviewed = None
+
+    def get_id(self):
+        return mstr(self.key)
+
+    def get_description(self):
+        return mstr(self.fields['description'])
+
+    def get_release_note(self):
+        if self.notes is None:
+            field = self.parent.field_id_map['Release Note']
+            if self.fields.has_key(field):
+                self.notes = mstr(self.fields[field])
+            else:
+                self.notes = self.get_description()
+        return self.notes
+
+    def get_priority(self):
+        ret = ""
+        pri = self.fields['priority']
+        if pri is not None:
+            ret = pri['name']
+        return mstr(ret)
+
+    def get_assignee(self):
+        ret = ""
+        mid = self.fields['assignee']
+        if mid is not None:
+            ret = mid['displayName']
+        return mstr(ret)
+
+    def get_components(self):
+        if len(self.fields['components']) > 0:
+            return ", ".join([comp['name'] for comp in self.fields['components']])
+        else:
+            return ""
+
+    def get_summary(self):
+        return self.fields['summary']
+
+    def get_type(self):
+        ret = ""
+        mid = self.fields['issuetype']
+        if mid is not None:
+            ret = mid['name']
+        return mstr(ret)
+
+    def get_reporter(self):
+        ret = ""
+        mid = self.fields['reporter']
+        if mid is not None:
+            ret = mid['displayName']
+        return mstr(ret)
+
+    def get_project(self):
+        ret = ""
+        mid = self.fields['project']
+        if mid is not None:
+            ret = mid['key']
+        return mstr(ret)
+
+    def __cmp__(self, other):
+        selfsplit = self.get_id().split('-')
+        othersplit = other.get_id().split('-')
+        result = cmp(selfsplit[0], othersplit[0])
+        if result != 0:
+            return result
+        else:
+            if selfsplit[1] < othersplit[1]:
+                return True
+            elif selfsplit[1] > othersplit[1]:
+                return False
+        return False
+
+    def get_incompatible_change(self):
+        if self.incompat is None:
+            field = self.parent.field_id_map['Hadoop Flags']
+            self.reviewed = False
+            self.incompat = False
+            if self.fields.has_key(field):
+                if self.fields[field]:
+                    for flag in self.fields[field]:
+                        if flag['value'] == "Incompatible change":
+                            self.incompat = True
+                        if flag['value'] == "Reviewed":
+                            self.reviewed = True
+        return self.incompat
+
+    def check_missing_component(self):
+        if len(self.fields['components']) > 0:
+            return False
         return True
-      elif selfsplit[1] > othersplit[1]:
+
+    def check_missing_assignee(self):
+        if self.fields['assignee'] is not None:
+            return False
+        return True
+
+    def check_version_string(self):
+        field = self.parent.field_id_map['Fix Version/s']
+        for ver in self.fields[field]:
+            found = re.match(r'^((\d+)(\.\d+)*).*$|^(\w+\-\d+)$', ver['name'])
+            if not found:
+                return True
         return False
-    return False
-
-  def getIncompatibleChange(self):
-    if (self.incompat is None):
-      field = self.parent.fieldIdMap['Hadoop Flags']
-      self.reviewed=False
-      self.incompat=False
-      if (self.fields.has_key(field)):
-        if self.fields[field]:
-          for hf in self.fields[field]:
-            if hf['value'] == "Incompatible change":
-              self.incompat=True
-            if hf['value'] == "Reviewed":
-              self.reviewed=True
-    return self.incompat
-
-  def checkMissingComponent(self):
-      if (len(self.fields['components'])>0):
-          return False
-      return True
-
-  def checkMissingAssignee(self):
-      if (self.fields['assignee'] is not None):
-          return False
-      return True
-
-  def checkVersionString(self):
-      field = self.parent.fieldIdMap['Fix Version/s']
-      for h in self.fields[field]:
-          found = re.match('^((\d+)(\.\d+)*).*$|^(\w+\-\d+)$', h['name'])
-          if not found:
-              return True
-      return False
-
-  def getReleaseDate(self,version):
-    for j in range(len(self.fields['fixVersions'])):
-      if self.fields['fixVersions'][j]==version:
-        return(self.fields['fixVersions'][j]['releaseDate'])
-    return None
-
-class JiraIter:
-  """An Iterator of JIRAs"""
-
-  def __init__(self, version, projects):
-    self.version = version
-    self.projects = projects
-    v=str(version).replace("-SNAPSHOT","")
-
-    resp = urllib2.urlopen("https://issues.apache.org/jira/rest/api/2/field")
-    data = json.loads(resp.read())
-
-    self.fieldIdMap = {}
-    for part in data:
-      self.fieldIdMap[part['name']] = part['id']
-
-    self.jiras = []
-    at=0
-    end=1
-    count=100
-    while (at < end):
-      params = urllib.urlencode({'jql': "project in ('"+"' , '".join(projects)+"') and fixVersion in ('"+v+"') and resolution = Fixed", 'startAt':at, 'maxResults':count})
-      resp = urllib2.urlopen("https://issues.apache.org/jira/rest/api/2/search?%s"%params)
-      data = json.loads(resp.read())
-      if (data.has_key('errorMessages')):
-        raise Exception(data['errorMessages'])
-      at = data['startAt'] + data['maxResults']
-      end = data['total']
-      self.jiras.extend(data['issues'])
-
-      needaversion=False
-      if v not in releaseVersion:
-        needaversion=True
-
-      if needaversion is True:
-        for i in range(len(data['issues'])):
-          for j in range(len(data['issues'][i]['fields']['fixVersions'])):
-            if 'releaseDate' in data['issues'][i]['fields']['fixVersions'][j]:
-              releaseVersion[data['issues'][i]['fields']['fixVersions'][j]['name']]=\
-                             data['issues'][i]['fields']['fixVersions'][j]['releaseDate']
-
-    self.iter = self.jiras.__iter__()
-
-  def __iter__(self):
-    return self
-
-  def next(self):
-    data = self.iter.next()
-    j = Jira(data, self)
-    return j
-
-class Outputs:
-  """Several different files to output to at the same time"""
-
-  def __init__(self, base_file_name, file_name_pattern, keys, params={}):
-    self.params = params
-    self.base = open(base_file_name%params, 'w')
-    self.others = {}
-    for key in keys:
-      both = dict(params)
-      both['key'] = key
-      self.others[key] = open(file_name_pattern%both, 'w')
-
-  def writeAll(self, pattern):
-    both = dict(self.params)
-    both['key'] = ''
-    self.base.write(pattern%both)
-    for key in self.others.keys():
-      both = dict(self.params)
-      both['key'] = key
-      self.others[key].write(pattern%both)
-
-  def writeKeyRaw(self, key, str):
-    self.base.write(str)
-    if (self.others.has_key(key)):
-      self.others[key].write(str)
-
-  def close(self):
-    self.base.close()
-    for fd in self.others.values():
-      fd.close()
-
-  def writeList(self, mylist):
-    for jira in sorted(mylist):
-      line = '| [%s](https://issues.apache.org/jira/browse/%s) | %s |  %s | %s | %s | %s |\n' \
-        % (notableclean(jira.getId()), notableclean(jira.getId()),
-           notableclean(jira.getSummary()),
-           notableclean(jira.getPriority()),
-           formatComponents(jira.getComponents()),
-           notableclean(jira.getReporter()),
-           notableclean(jira.getAssignee()))
-      self.writeKeyRaw(jira.getProject(), line)
+
+    def get_release_date(self, version):
+        fix_versions = self.fields['fixVersions']
+        for j in range(len(fix_versions)):
+            if fix_versions[j] == version:
+                return fix_versions[j]['releaseDate']
+        return None
+
+class JiraIter(object):
+    """An Iterator of JIRAs"""
+
+    def __init__(self, version, projects):
+        self.version = version
+        self.projects = projects
+        ver = str(version).replace("-SNAPSHOT", "")
+
+        resp = urllib2.urlopen("https://issues.apache.org/jira/rest/api/2/field")
+        data = json.loads(resp.read())
+
+        self.field_id_map = {}
+        for part in data:
+            self.field_id_map[part['name']] = part['id']
+
+        self.jiras = []
+        pos = 0
+        end = 1
+        count = 100
+        while pos < end:
+            pjs = "','".join(projects)
+            jql = "project in ('%s') and fixVersion in ('%s') and resolution = Fixed" % (pjs, ver)
+            params = urllib.urlencode({'jql': jql, 'startAt':pos, 'maxResults':count})
+            resp = urllib2.urlopen("https://issues.apache.org/jira/rest/api/2/search?%s" % params)
+            data = json.loads(resp.read())
+            if data.has_key('error_messages'):
+                raise Exception(data['error_messages'])
+            pos = data['startAt'] + data['maxResults']
+            end = data['total']
+            self.jiras.extend(data['issues'])
+
+            needaversion = False
+            if ver not in RELEASE_VERSION:
+                needaversion = True
+
+            if needaversion is True:
+                issues = data['issues']
+                for i in range(len(issues)):
+                    fix_versions = issues[i]['fields']['fixVersions']
+                    for j in range(len(fix_versions)):
+                        fields = fix_versions[j]
+                        if 'releaseDate' in fields:
+                            RELEASE_VERSION[fields['name']] = fields['releaseDate']
+
+        self.iter = self.jiras.__iter__()
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        data = self.iter.next()
+        j = Jira(data, self)
+        return j
+
+class Outputs(object):
+    """Several different files to output to at the same time"""
+
+    def __init__(self, base_file_name, file_name_pattern, keys, params=None):
+        if params is None:
+            params = {}
+        self.params = params
+        self.base = open(base_file_name%params, 'w')
+        self.others = {}
+        for key in keys:
+            both = dict(params)
+            both['key'] = key
+            self.others[key] = open(file_name_pattern%both, 'w')
+
+    def write_all(self, pattern):
+        both = dict(self.params)
+        both['key'] = ''
+        self.base.write(pattern%both)
+        for key in self.others.keys():
+            both = dict(self.params)
+            both['key'] = key
+            self.others[key].write(pattern%both)
+
+    def write_key_raw(self, key, _str):
+        self.base.write(_str)
+        if self.others.has_key(key):
+            self.others[key].write(_str)
+
+    def close(self):
+        self.base.close()
+        for value in self.others.values():
+            value.close()
+
+    def write_list(self, mylist):
+        for jira in sorted(mylist):
+            line = '| [%s](https://issues.apache.org/jira/browse/%s) | %s |  %s | %s | %s | %s |\n'
+            line = line % (notableclean(jira.get_id()),
+                           notableclean(jira.get_id()),
+                           notableclean(jira.get_summary()),
+                           notableclean(jira.get_priority()),
+                           format_components(jira.get_components()),
+                           notableclean(jira.get_reporter()),
+                           notableclean(jira.get_assignee()))
+            self.write_key_raw(jira.get_project(), line)
 
 def main():
-  parser = OptionParser(usage="usage: %prog --project PROJECT [--project PROJECT] --version VERSION [--version VERSION2 ...]",
-		epilog=
-               "Markdown-formatted CHANGES and RELEASENOTES files will be stored in a directory"
-               " named after the highest version provided.")
-  parser.add_option("-i","--index", dest="index", action="store_true",
-             default=False, help="build an index file")
-  parser.add_option("-l","--license", dest="license", action="store_false",
-             default=True, help="Add an ASF license")
-  parser.add_option("-n","--lint", dest="lint", action="store_true",
-             help="use lint flag to exit on failures")
-  parser.add_option("-p", "--project", dest="projects",
-             action="append", type="string",
-             help="projects in JIRA to include in releasenotes", metavar="PROJECT")
-  parser.add_option("-r", "--range", dest="range", action="store_true",
-             default=False, help="Given versions are a range")
-  parser.add_option("-t", "--projecttitle", dest="title",
-             type="string",
-             help="Title to use for the project (default is Apache PROJECT)")
-  parser.add_option("-u","--usetoday", dest="usetoday", action="store_true",
-             default=False, help="use current date for unreleased versions")
-  parser.add_option("-v", "--version", dest="versions",
-             action="append", type="string",
-             help="versions in JIRA to include in releasenotes", metavar="VERSION")
-  (options, args) = parser.parse_args()
-
-  if (options.versions is None):
-    parser.error("At least one version needs to be supplied")
-
-  proxy = urllib2.ProxyHandler()
-  opener = urllib2.build_opener(proxy)
-  urllib2.install_opener(opener)
-
-  projects = options.projects
-  if projects is None:
-    parser.error("At least one project needs to be supplied")
-
-  if (options.range is True):
-    versions = [ Version(v) for v in GetVersions(options.versions, projects).getlist() ]
-  else:
-    versions = [ Version(v) for v in options.versions ]
-  versions.sort();
-
-  if (options.title is None):
-    title=projects[0]
-  else:
-    title=options.title
-
-  haderrors=False
-
-  for v in versions:
-    vstr=str(v)
-    jlist = JiraIter(vstr,projects)
-
-    if vstr in releaseVersion:
-      reldate=releaseVersion[vstr]
-    elif options.usetoday:
-      reldate=strftime("%Y-%m-%d", gmtime())
+    usage = "usage: %prog --project PROJECT [--project PROJECT] --version VERSION [--version VERSION2 ...]"
+    parser = OptionParser(usage=usage,
+                          epilog="Markdown-formatted CHANGES and RELEASENOTES files will be stored"
+                                 "in a directory named after the highest version provided.")
+    parser.add_option("-i", "--index", dest="index", action="store_true",
+                      default=False, help="build an index file")
+    parser.add_option("-l", "--license", dest="license", action="store_false",
+                      default=True, help="Add an ASF license")
+    parser.add_option("-n", "--lint", dest="lint", action="store_true",
+                      help="use lint flag to exit on failures")
+    parser.add_option("-p", "--project", dest="projects",
+                      action="append", type="string",
+                      help="projects in JIRA to include in releasenotes", metavar="PROJECT")
+    parser.add_option("-r", "--range", dest="range", action="store_true",
+                      default=False, help="Given versions are a range")
+    parser.add_option("-t", "--projecttitle", dest="title", type="string",
+                      help="Title to use for the project (default is Apache PROJECT)")
+    parser.add_option("-u", "--usetoday", dest="usetoday", action="store_true",
+                      default=False, help="use current date for unreleased versions")
+    parser.add_option("-v", "--version", dest="versions", action="append", type="string",
+                      help="versions in JIRA to include in releasenotes", metavar="VERSION")
+    (options, _) = parser.parse_args()
+
+    if options.versions is None:
+        parser.error("At least one version needs to be supplied")
+
+    proxy = urllib2.ProxyHandler()
+    opener = urllib2.build_opener(proxy)
+    urllib2.install_opener(opener)
+
+    projects = options.projects
+    if projects is None:
+        parser.error("At least one project needs to be supplied")
+
+    if options.range is True:
+        versions = [Version(v) for v in GetVersions(options.versions, projects).getlist()]
+    else:
+        versions = [Version(v) for v in options.versions]
+    versions.sort()
+
+    if options.title is None:
+        title = projects[0]
     else:
-      reldate="Unreleased"
-
-    if not os.path.exists(vstr):
-      os.mkdir(vstr)
-
-    reloutputs = Outputs("%(ver)s/RELEASENOTES.%(ver)s.md",
-      "%(ver)s/RELEASENOTES.%(key)s.%(ver)s.md",
-      [], {"ver":v, "date":reldate, "title":title})
-    choutputs = Outputs("%(ver)s/CHANGES.%(ver)s.md",
-      "%(ver)s/CHANGES.%(key)s.%(ver)s.md",
-      [], {"ver":v, "date":reldate, "title":title})
-
-    if (options.license is True):
-      reloutputs.writeAll(asflicense)
-      choutputs.writeAll(asflicense)
-
-    relhead = '# %(title)s %(key)s %(ver)s Release Notes\n\n' \
-      'These release notes cover new developer and user-facing incompatibilities, features, and major improvements.\n\n'
-    chhead = '# %(title)s Changelog\n\n' \
-      '## Release %(ver)s - %(date)s\n'\
-      '\n'
-
-    reloutputs.writeAll(relhead)
-    choutputs.writeAll(chhead)
-    errorCount=0
-    warningCount=0
-    lintMessage=""
-    incompatlist=[]
-    buglist=[]
-    improvementlist=[]
-    newfeaturelist=[]
-    subtasklist=[]
-    tasklist=[]
-    testlist=[]
-    otherlist=[]
-
-    for jira in sorted(jlist):
-      if jira.getIncompatibleChange():
-        incompatlist.append(jira)
-      elif jira.getType() == "Bug":
-        buglist.append(jira)
-      elif jira.getType() == "Improvement":
-        improvementlist.append(jira)
-      elif jira.getType() == "New Feature":
-        newfeaturelist.append(jira)
-      elif jira.getType() == "Sub-task":
-        subtasklist.append(jira)
-      elif jira.getType() == "Task":
-       tasklist.append(jira)
-      elif jira.getType() == "Test":
-        testlist.append(jira)
-      else:
-         otherlist.append(jira)
-
-      line = '* [%s](https://issues.apache.org/jira/browse/%s) | *%s* | **%s**\n' \
-          % (notableclean(jira.getId()), notableclean(jira.getId()), notableclean(jira.getPriority()),
-             notableclean(jira.getSummary()))
-
-      if (jira.getIncompatibleChange()) and (len(jira.getReleaseNote())==0):
-        warningCount+=1
-        reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n")
-        reloutputs.writeKeyRaw(jira.getProject(), line)
-        line ='\n**WARNING: No release note provided for this incompatible change.**\n\n'
-        lintMessage += "\nWARNING: incompatible change %s lacks release notes." % (notableclean(jira.getId()))
-        reloutputs.writeKeyRaw(jira.getProject(), line)
-
-      if jira.checkVersionString():
-          warningCount+=1
-          lintMessage += "\nWARNING: Version string problem for %s " % jira.getId()
-
-      if (jira.checkMissingComponent() or jira.checkMissingAssignee()):
-          errorCount+=1
-          errorMessage=[]
-          jira.checkMissingComponent() and errorMessage.append("component")
-          jira.checkMissingAssignee() and errorMessage.append("assignee")
-          lintMessage += "\nERROR: missing %s for %s " %  (" and ".join(errorMessage) , jira.getId())
-
-      if (len(jira.getReleaseNote())>0):
-        reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n")
-        reloutputs.writeKeyRaw(jira.getProject(), line)
-        line ='\n%s\n\n' % (tableclean(jira.getReleaseNote()))
-        reloutputs.writeKeyRaw(jira.getProject(), line)
-
-    if (options.lint is True):
-        print lintMessage
-        print "======================================="
-        print "%s: Error:%d, Warning:%d \n" % (vstr, errorCount, warningCount)
-        if (errorCount>0):
-           haderrors=True
-           cleanOutputDir(vstr)
-           continue
-
-    reloutputs.writeAll("\n\n")
-    reloutputs.close()
-
-    choutputs.writeAll("### INCOMPATIBLE CHANGES:\n\n")
-    choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
-    choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
-    choutputs.writeList(incompatlist)
-
-    choutputs.writeAll("\n\n### NEW FEATURES:\n\n")
-    choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
-    choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
-    choutputs.writeList(newfeaturelist)
-
-    choutputs.writeAll("\n\n### IMPROVEMENTS:\n\n")
-    choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
-    choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
-    choutputs.writeList(improvementlist)
-
-    choutputs.writeAll("\n\n### BUG FIXES:\n\n")
-    choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
-    choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
-    choutputs.writeList(buglist)
-
-    choutputs.writeAll("\n\n### TESTS:\n\n")
-    choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
-    choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
-    choutputs.writeList(testlist)
-
-    choutputs.writeAll("\n\n### SUB-TASKS:\n\n")
-    choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
-    choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
-    choutputs.writeList(subtasklist)
-
-    choutputs.writeAll("\n\n### OTHER:\n\n")
-    choutputs.writeAll("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
-    choutputs.writeAll("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
-    choutputs.writeList(otherlist)
-    choutputs.writeList(tasklist)
-
-    choutputs.writeAll("\n\n")
-    choutputs.close()
-
-  if options.index:
-    buildindex(title,options.license)
-
-  if haderrors is True:
-    sys.exit(1)
+        title = options.title
+
+    haderrors = False
+
+    for version in versions:
+        vstr = str(version)
+        jlist = JiraIter(vstr, projects)
+
+        if vstr in RELEASE_VERSION:
+            reldate = RELEASE_VERSION[vstr]
+        elif options.usetoday:
+            reldate = strftime("%Y-%m-%d", gmtime())
+        else:
+            reldate = "Unreleased"
+
+        if not os.path.exists(vstr):
+            os.mkdir(vstr)
+
+        reloutputs = Outputs("%(ver)s/RELEASENOTES.%(ver)s.md",
+                             "%(ver)s/RELEASENOTES.%(key)s.%(ver)s.md",
+                             [], {"ver":version, "date":reldate, "title":title})
+        choutputs = Outputs("%(ver)s/CHANGES.%(ver)s.md",
+                            "%(ver)s/CHANGES.%(key)s.%(ver)s.md",
+                            [], {"ver":version, "date":reldate, "title":title})
+
+        if options.license is True:
+            reloutputs.write_all(ASF_LICENSE)
+            choutputs.write_all(ASF_LICENSE)
+
+        relhead = '# %(title)s %(key)s %(ver)s Release Notes\n\n' \
+                  'These release notes cover new developer and user-facing ' \
+                  'incompatibilities, features, and major improvements.\n\n'
+        chhead = '# %(title)s Changelog\n\n' \
+                 '## Release %(ver)s - %(date)s\n'\
+                 '\n'
+
+        reloutputs.write_all(relhead)
+        choutputs.write_all(chhead)
+        error_count = 0
+        warning_count = 0
+        lint_message = ""
+        incompatlist = []
+        buglist = []
+        improvementlist = []
+        newfeaturelist = []
+        subtasklist = []
+        tasklist = []
+        testlist = []
+        otherlist = []
+
+        for jira in sorted(jlist):
+            if jira.get_incompatible_change():
+                incompatlist.append(jira)
+            elif jira.get_type() == "Bug":
+                buglist.append(jira)
+            elif jira.get_type() == "Improvement":
+                improvementlist.append(jira)
+            elif jira.get_type() == "New Feature":
+                newfeaturelist.append(jira)
+            elif jira.get_type() == "Sub-task":
+                subtasklist.append(jira)
+            elif jira.get_type() == "Task":
+                tasklist.append(jira)
+            elif jira.get_type() == "Test":
+                testlist.append(jira)
+            else:
+                otherlist.append(jira)
+
+            line = '* [%s](https://issues.apache.org/jira/browse/%s) | *%s* | **%s**\n' \
+                   % (notableclean(jira.get_id()), notableclean(jira.get_id()),
+                      notableclean(jira.get_priority()), notableclean(jira.get_summary()))
+
+            if jira.get_incompatible_change() and len(jira.get_release_note()) == 0:
+                warning_count += 1
+                reloutputs.write_key_raw(jira.get_project(), "\n---\n\n")
+                reloutputs.write_key_raw(jira.get_project(), line)
+                line = '\n**WARNING: No release note provided for this incompatible change.**\n\n'
+                lint_message += "\nWARNING: incompatible change %s lacks release notes." % \
+                                (notableclean(jira.get_id()))
+                reloutputs.write_key_raw(jira.get_project(), line)
+
+            if jira.check_version_string():
+                warning_count += 1
+                lint_message += "\nWARNING: Version string problem for %s " % jira.get_id()
+
+            if jira.check_missing_component() or jira.check_missing_assignee():
+                error_count += 1
+                error_message = []
+                if jira.check_missing_component():
+                    error_message.append("component")
+                if jira.check_missing_assignee():
+                    error_message.append("assignee")
+                lint_message += "\nERROR: missing %s for %s " \
+                                % (" and ".join(error_message), jira.get_id())
+
+            if len(jira.get_release_note()) > 0:
+                reloutputs.write_key_raw(jira.get_project(), "\n---\n\n")
+                reloutputs.write_key_raw(jira.get_project(), line)
+                line = '\n%s\n\n' % (tableclean(jira.get_release_note()))
+                reloutputs.write_key_raw(jira.get_project(), line)
+
+        if options.lint is True:
+            print lint_message
+            print "======================================="
+            print "%s: Error:%d, Warning:%d \n" % (vstr, error_count, warning_count)
+            if error_count > 0:
+                haderrors = True
+                clean_output_dir(vstr)
+                continue
+
+        reloutputs.write_all("\n\n")
+        reloutputs.close()
+
+        choutputs.write_all("### INCOMPATIBLE CHANGES:\n\n")
+        choutputs.write_all("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
+        choutputs.write_all("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
+        choutputs.write_list(incompatlist)
+
+        choutputs.write_all("\n\n### NEW FEATURES:\n\n")
+        choutputs.write_all("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
+        choutputs.write_all("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
+        choutputs.write_list(newfeaturelist)
+
+        choutputs.write_all("\n\n### IMPROVEMENTS:\n\n")
+        choutputs.write_all("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
+        choutputs.write_all("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
+        choutputs.write_list(improvementlist)
+
+        choutputs.write_all("\n\n### BUG FIXES:\n\n")
+        choutputs.write_all("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
+        choutputs.write_all("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
+        choutputs.write_list(buglist)
+
+        choutputs.write_all("\n\n### TESTS:\n\n")
+        choutputs.write_all("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
+        choutputs.write_all("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
+        choutputs.write_list(testlist)
+
+        choutputs.write_all("\n\n### SUB-TASKS:\n\n")
+        choutputs.write_all("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
+        choutputs.write_all("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
+        choutputs.write_list(subtasklist)
+
+        choutputs.write_all("\n\n### OTHER:\n\n")
+        choutputs.write_all("| JIRA | Summary | Priority | Component | Reporter | Contributor |\n")
+        choutputs.write_all("|:---- |:---- | :--- |:---- |:---- |:---- |\n")
+        choutputs.write_list(otherlist)
+        choutputs.write_list(tasklist)
+
+        choutputs.write_all("\n\n")
+        choutputs.close()
+
+    if options.index:
+        buildindex(title, options.license)
+
+    if haderrors is True:
+        sys.exit(1)
 
 if __name__ == "__main__":
-  main()
+    main()


[12/14] hadoop git commit: HDFS-8816. Improve visualization for the Datanode tab in the NN UI. Contributed by Haohui Mai.

Posted by aw...@apache.org.
HDFS-8816. Improve visualization for the Datanode tab in the NN UI. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ddc867ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ddc867ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ddc867ce

Branch: refs/heads/HADOOP-12111
Commit: ddc867ceb9a76986e8379361753598cc48024376
Parents: c020b62
Author: Haohui Mai <wh...@apache.org>
Authored: Thu Jul 23 10:25:32 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Wed Jul 29 17:14:05 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml         |  1 +
 .../src/main/webapps/hdfs/dfshealth.html        | 44 +++++++++--------
 .../src/main/webapps/hdfs/dfshealth.js          | 46 ++++++++++++++++--
 .../src/main/webapps/static/hadoop.css          | 51 ++++++++++++++++++++
 .../src/main/webapps/static/moment.min.js       |  7 +++
 6 files changed, 126 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddc867ce/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cf03d3c..e7af2cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -760,6 +760,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-8822. Add SSD storagepolicy tests in TestBlockStoragePolicy#
     testDefaultPolicies (vinayakumarb)
 
+    HDFS-8816. Improve visualization for the Datanode tab in the NN UI. (wheat9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddc867ce/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index db38851..145a8cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -386,6 +386,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             <exclude>src/contrib/**</exclude>
             <exclude>src/site/resources/images/*</exclude>
             <exclude>src/main/webapps/static/bootstrap-3.0.2/**</exclude>
+            <exclude>src/main/webapps/static/moment.min.js</exclude>
             <exclude>src/main/webapps/static/dust-full-2.0.0.min.js</exclude>
             <exclude>src/main/webapps/static/dust-helpers-1.1.1.min.js</exclude>
             <exclude>src/main/webapps/static/jquery-1.10.2.min.js</exclude>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddc867ce/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 8cdff84..6b48be7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -280,6 +280,14 @@
 
 <script type="text/x-dust-template" id="tmpl-datanode">
 <div class="page-header"><h1>Datanode Information</h1></div>
+<div>
+  <ul class="dfshealth-node-legend">
+    <li class="dfshealth-node-icon dfshealth-node-alive">In service</li>
+    <li class="dfshealth-node-icon dfshealth-node-down">Down</li>
+    <li class="dfshealth-node-icon dfshealth-node-decommisioned">Decommisioned</li>
+    <li class="dfshealth-node-icon dfshealth-node-down-decommisioned">Decommissioned &amp; dead</li>
+  </ul>
+</div>
 <div class="page-header"><h1><small>In operation</small></h1></div>
 <small>
 <table class="table">
@@ -287,41 +295,34 @@
     <tr>
       <th>Node</th>
       <th>Last contact</th>
-      <th>Admin State</th>
-      <th>Capacity</th>
-      <th>Used</th>
-      <th>Non DFS Used</th>
-      <th>Remaining</th>
+      <th style="width:180px; text-align:center">Capacity</th>
       <th>Blocks</th>
       <th>Block pool used</th>
-      <th>Failed Volumes</th>
       <th>Version</th>
     </tr>
   </thead>
   {#LiveNodes}
   <tr>
-    <td>{name} ({xferaddr})</td>
-    <td>{lastContact}</td>
-    <td>{adminState}</td>
-    <td>{capacity|fmt_bytes}</td>
-    <td>{used|fmt_bytes}</td>
-    <td>{nonDfsUsedSpace|fmt_bytes}</td>
-    <td>{remaining|fmt_bytes}</td>
+    <td class="dfshealth-node-icon dfshealth-node-{state}">{name} ({xferaddr})</td>
+    <td ng-value="{lastContact}">{#helper_relative_time value="{lastContact}"/}</td>
+    <td ng-value="{usedPercentage}">
+      <div>
+        <div style="display:inline-block; float: left; padding-right: 10px;">{capacity|fmt_bytes}</div>
+        <div class="clearfix progress dfshealth-node-capacity-bar" title="Non DFS: {nonDfsUsedSpace|fmt_bytes}, Used: {used|fmt_bytes}">
+          <div class="progress-bar {#helper_usage_bar value="{usedPercentage}"/}" style="width: {usedPercentage}%">
+          </div>
+        </div>
+      </div>
+    </td>
     <td>{numBlocks}</td>
     <td>{blockPoolUsed|fmt_bytes} ({blockPoolUsedPercent|fmt_percentage})</td>
-    <td>{volfails}</td>
     <td>{version}</td>
   </tr>
   {/LiveNodes}
   {#DeadNodes}
   <tr class="danger">
-    <td>{name} ({xferaddr})</td>
-    <td>{#helper_lastcontact_tostring value="{lastContact}"/}</td>
-    <td>Dead{?decommissioned}, Decommissioned{/decommissioned}</td>
-    <td>-</td>
-    <td>-</td>
-    <td>-</td>
-    <td>-</td>
+    <td class="dfshealth-node-icon dfshealth-node-{state}">{name} ({xferaddr})</td>
+    <td>{#helper_relative_time value="{lastContact}"/}</td>
     <td>-</td>
     <td>-</td>
     <td>-</td>
@@ -418,6 +419,7 @@ There are no reported volume failures.
 
 <script type="text/javascript" src="/static/jquery-1.10.2.min.js">
 </script><script type="text/javascript" src="/static/bootstrap-3.0.2/js/bootstrap.min.js">
+</script><script type="text/javascript" src="/static/moment.min.js">
 </script><script type="text/javascript" src="/static/dust-full-2.0.0.min.js">
 </script><script type="text/javascript" src="/static/dust-helpers-1.1.1.min.js">
 </script><script type="text/javascript" src="/static/dfs-dust.js">

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddc867ce/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
index 1c13493..de93854 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
@@ -165,12 +165,24 @@
   }
 
   function load_datanode_info() {
-
     var HELPERS = {
-      'helper_lastcontact_tostring' : function (chunk, ctx, bodies, params) {
+      'helper_relative_time' : function (chunk, ctx, bodies, params) {
         var value = dust.helpers.tap(params.value, chunk, ctx);
-        return chunk.write('' + new Date(Date.now()-1000*Number(value)));
-      }
+        return chunk.write(moment().subtract(Number(value), 'seconds').format('YYYY-MM-DD HH:mm:ss'));
+      },
+      'helper_usage_bar' : function (chunk, ctx, bodies, params) {
+        var value = dust.helpers.tap(params.value, chunk, ctx);
+        var v = Number(value);
+        var r = null;
+        if (v < 70) {
+          r = 'progress-bar-success';
+        } else if (u.usedPercentage < 85) {
+          r = 'progress-bar-warning';
+        } else {
+          r = "progress-bar-danger";
+        }
+        return chunk.write(r);
+      },
     };
 
     function workaround(r) {
@@ -184,8 +196,34 @@
         return res;
       }
 
+      function augment_live_nodes(nodes) {
+        for (var i = 0, e = nodes.length; i < e; ++i) {
+          var n = nodes[i];
+          n.usedPercentage = Math.round((n.used + n.nonDfsUsedSpace) * 1.0 / n.capacity * 100);
+          if (n.adminState === "In Service") {
+            n.state = "alive";
+          } else if (nodes[i].adminState === "Decommission In Progress") {
+            n.state = "decommisioning";
+          } else if (nodes[i].adminState === "Decommissioned") {
+            n.state = "decommissioned";
+          }
+        }
+      }
+
+      function augment_dead_nodes(nodes) {
+        for (var i = 0, e = nodes.length; i < e; ++i) {
+          if (nodes[i].decommissioned) {
+            nodes[i].state = "down-decommissioned";
+          } else {
+            nodes[i].state = "down";
+          }
+        }
+      }
+
       r.LiveNodes = node_map_to_array(JSON.parse(r.LiveNodes));
+      augment_live_nodes(r.LiveNodes);
       r.DeadNodes = node_map_to_array(JSON.parse(r.DeadNodes));
+      augment_dead_nodes(r.DeadNodes);
       r.DecomNodes = node_map_to_array(JSON.parse(r.DecomNodes));
       return r;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddc867ce/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
index 4b0e967..c13fe3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
@@ -212,4 +212,55 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand {
 #alert-panel {
   margin-top:20px;
   display: none;
+}
+
+.dfshealth-node-capacity-bar {
+    margin-bottom:0;
+    width: 60%;
+}
+
+.dfshealth-node-icon:before {
+    font-size: 10pt;
+    padding-right: 1pt;
+    font-family: 'Glyphicons Halflings';
+    font-style: normal;
+    font-weight: normal;
+    line-height: 1;
+    -webkit-font-smoothing: antialiased;
+    -moz-osx-font-smoothing: grayscale;
+}
+
+.dfshealth-node-alive:before {
+    color: #5fa341;
+    content: "\e013";
+}
+
+.dfshealth-node-decommisioned:before {
+    color: #eea236;
+    content: "\e136";
+}
+
+.dfshealth-node-down:before {
+    color: #c7254e;
+    content: "\e101";
+}
+
+.dfshealth-node-down-decommisioned:before {
+    color: #2e6da6;
+    content: "\e017";
+}
+
+.dfshealth-node-legend {
+    list-style-type: none;
+    text-align: right;
+}
+
+.dfshealth-node-legend li {
+    display: inline;
+    padding: 10pt;
+    padding-left: 10pt;
+}
+
+.dfshealth-node-legend li:before {
+    padding-right: 5pt;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddc867ce/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/moment.min.js
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/moment.min.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/moment.min.js
new file mode 100644
index 0000000..05199bd
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/moment.min.js
@@ -0,0 +1,7 @@
+//! moment.js
+//! version : 2.10.3
+//! authors : Tim Wood, Iskren Chernev, Moment.js contributors
+//! license : MIT
+//! momentjs.com
+!function(a,b){"object"==typeof exports&&"undefined"!=typeof module?module.exports=b():"function"==typeof define&&define.amd?define(b):a.moment=b()}(this,function(){"use strict";function a(){return Dc.apply(null,arguments)}function b(a){Dc=a}function c(a){return"[object Array]"===Object.prototype.toString.call(a)}function d(a){return a instanceof Date||"[object Date]"===Object.prototype.toString.call(a)}function e(a,b){var c,d=[];for(c=0;c<a.length;++c)d.push(b(a[c],c));return d}function f(a,b){return Object.prototype.hasOwnProperty.call(a,b)}function g(a,b){for(var c in b)f(b,c)&&(a[c]=b[c]);return f(b,"toString")&&(a.toString=b.toString),f(b,"valueOf")&&(a.valueOf=b.valueOf),a}function h(a,b,c,d){return za(a,b,c,d,!0).utc()}function i(){return{empty:!1,unusedTokens:[],unusedInput:[],overflow:-2,charsLeftOver:0,nullInput:!1,invalidMonth:null,invalidFormat:!1,userInvalidated:!1,iso:!1}}function j(a){return null==a._pf&&(a._pf=i()),a._pf}function k(a){if(null==a._isValid){var b=j(a);
 a._isValid=!isNaN(a._d.getTime())&&b.overflow<0&&!b.empty&&!b.invalidMonth&&!b.nullInput&&!b.invalidFormat&&!b.userInvalidated,a._strict&&(a._isValid=a._isValid&&0===b.charsLeftOver&&0===b.unusedTokens.length&&void 0===b.bigHour)}return a._isValid}function l(a){var b=h(0/0);return null!=a?g(j(b),a):j(b).userInvalidated=!0,b}function m(a,b){var c,d,e;if("undefined"!=typeof b._isAMomentObject&&(a._isAMomentObject=b._isAMomentObject),"undefined"!=typeof b._i&&(a._i=b._i),"undefined"!=typeof b._f&&(a._f=b._f),"undefined"!=typeof b._l&&(a._l=b._l),"undefined"!=typeof b._strict&&(a._strict=b._strict),"undefined"!=typeof b._tzm&&(a._tzm=b._tzm),"undefined"!=typeof b._isUTC&&(a._isUTC=b._isUTC),"undefined"!=typeof b._offset&&(a._offset=b._offset),"undefined"!=typeof b._pf&&(a._pf=j(b)),"undefined"!=typeof b._locale&&(a._locale=b._locale),Fc.length>0)for(c in Fc)d=Fc[c],e=b[d],"undefined"!=typeof e&&(a[d]=e);return a}function n(b){m(this,b),this._d=new Date(+b._d),Gc===!1&&(Gc=!0,a.updateOff
 set(this),Gc=!1)}function o(a){return a instanceof n||null!=a&&null!=a._isAMomentObject}function p(a){var b=+a,c=0;return 0!==b&&isFinite(b)&&(c=b>=0?Math.floor(b):Math.ceil(b)),c}function q(a,b,c){var d,e=Math.min(a.length,b.length),f=Math.abs(a.length-b.length),g=0;for(d=0;e>d;d++)(c&&a[d]!==b[d]||!c&&p(a[d])!==p(b[d]))&&g++;return g+f}function r(){}function s(a){return a?a.toLowerCase().replace("_","-"):a}function t(a){for(var b,c,d,e,f=0;f<a.length;){for(e=s(a[f]).split("-"),b=e.length,c=s(a[f+1]),c=c?c.split("-"):null;b>0;){if(d=u(e.slice(0,b).join("-")))return d;if(c&&c.length>=b&&q(e,c,!0)>=b-1)break;b--}f++}return null}function u(a){var b=null;if(!Hc[a]&&"undefined"!=typeof module&&module&&module.exports)try{b=Ec._abbr,require("./locale/"+a),v(b)}catch(c){}return Hc[a]}function v(a,b){var c;return a&&(c="undefined"==typeof b?x(a):w(a,b),c&&(Ec=c)),Ec._abbr}function w(a,b){return null!==b?(b.abbr=a,Hc[a]||(Hc[a]=new r),Hc[a].set(b),v(a),Hc[a]):(delete Hc[a],null)}function x(a
 ){var b;if(a&&a._locale&&a._locale._abbr&&(a=a._locale._abbr),!a)return Ec;if(!c(a)){if(b=u(a))return b;a=[a]}return t(a)}function y(a,b){var c=a.toLowerCase();Ic[c]=Ic[c+"s"]=Ic[b]=a}function z(a){return"string"==typeof a?Ic[a]||Ic[a.toLowerCase()]:void 0}function A(a){var b,c,d={};for(c in a)f(a,c)&&(b=z(c),b&&(d[b]=a[c]));return d}function B(b,c){return function(d){return null!=d?(D(this,b,d),a.updateOffset(this,c),this):C(this,b)}}function C(a,b){return a._d["get"+(a._isUTC?"UTC":"")+b]()}function D(a,b,c){return a._d["set"+(a._isUTC?"UTC":"")+b](c)}function E(a,b){var c;if("object"==typeof a)for(c in a)this.set(c,a[c]);else if(a=z(a),"function"==typeof this[a])return this[a](b);return this}function F(a,b,c){for(var d=""+Math.abs(a),e=a>=0;d.length<b;)d="0"+d;return(e?c?"+":"":"-")+d}function G(a,b,c,d){var e=d;"string"==typeof d&&(e=function(){return this[d]()}),a&&(Mc[a]=e),b&&(Mc[b[0]]=function(){return F(e.apply(this,arguments),b[1],b[2])}),c&&(Mc[c]=function(){return this.l
 ocaleData().ordinal(e.apply(this,arguments),a)})}function H(a){return a.match(/\[[\s\S]/)?a.replace(/^\[|\]$/g,""):a.replace(/\\/g,"")}function I(a){var b,c,d=a.match(Jc);for(b=0,c=d.length;c>b;b++)Mc[d[b]]?d[b]=Mc[d[b]]:d[b]=H(d[b]);return function(e){var f="";for(b=0;c>b;b++)f+=d[b]instanceof Function?d[b].call(e,a):d[b];return f}}function J(a,b){return a.isValid()?(b=K(b,a.localeData()),Lc[b]||(Lc[b]=I(b)),Lc[b](a)):a.localeData().invalidDate()}function K(a,b){function c(a){return b.longDateFormat(a)||a}var d=5;for(Kc.lastIndex=0;d>=0&&Kc.test(a);)a=a.replace(Kc,c),Kc.lastIndex=0,d-=1;return a}function L(a,b,c){_c[a]="function"==typeof b?b:function(a){return a&&c?c:b}}function M(a,b){return f(_c,a)?_c[a](b._strict,b._locale):new RegExp(N(a))}function N(a){return a.replace("\\","").replace(/\\(\[)|\\(\])|\[([^\]\[]*)\]|\\(.)/g,function(a,b,c,d,e){return b||c||d||e}).replace(/[-\/\\^$*+?.()|[\]{}]/g,"\\$&")}function O(a,b){var c,d=b;for("string"==typeof a&&(a=[a]),"number"==typeof 
 b&&(d=function(a,c){c[b]=p(a)}),c=0;c<a.length;c++)ad[a[c]]=d}function P(a,b){O(a,function(a,c,d,e){d._w=d._w||{},b(a,d._w,d,e)})}function Q(a,b,c){null!=b&&f(ad,a)&&ad[a](b,c._a,c,a)}function R(a,b){return new Date(Date.UTC(a,b+1,0)).getUTCDate()}function S(a){return this._months[a.month()]}function T(a){return this._monthsShort[a.month()]}function U(a,b,c){var d,e,f;for(this._monthsParse||(this._monthsParse=[],this._longMonthsParse=[],this._shortMonthsParse=[]),d=0;12>d;d++){if(e=h([2e3,d]),c&&!this._longMonthsParse[d]&&(this._longMonthsParse[d]=new RegExp("^"+this.months(e,"").replace(".","")+"$","i"),this._shortMonthsParse[d]=new RegExp("^"+this.monthsShort(e,"").replace(".","")+"$","i")),c||this._monthsParse[d]||(f="^"+this.months(e,"")+"|^"+this.monthsShort(e,""),this._monthsParse[d]=new RegExp(f.replace(".",""),"i")),c&&"MMMM"===b&&this._longMonthsParse[d].test(a))return d;if(c&&"MMM"===b&&this._shortMonthsParse[d].test(a))return d;if(!c&&this._monthsParse[d].test(a))return d
 }}function V(a,b){var c;return"string"==typeof b&&(b=a.localeData().monthsParse(b),"number"!=typeof b)?a:(c=Math.min(a.date(),R(a.year(),b)),a._d["set"+(a._isUTC?"UTC":"")+"Month"](b,c),a)}function W(b){return null!=b?(V(this,b),a.updateOffset(this,!0),this):C(this,"Month")}function X(){return R(this.year(),this.month())}function Y(a){var b,c=a._a;return c&&-2===j(a).overflow&&(b=c[cd]<0||c[cd]>11?cd:c[dd]<1||c[dd]>R(c[bd],c[cd])?dd:c[ed]<0||c[ed]>24||24===c[ed]&&(0!==c[fd]||0!==c[gd]||0!==c[hd])?ed:c[fd]<0||c[fd]>59?fd:c[gd]<0||c[gd]>59?gd:c[hd]<0||c[hd]>999?hd:-1,j(a)._overflowDayOfYear&&(bd>b||b>dd)&&(b=dd),j(a).overflow=b),a}function Z(b){a.suppressDeprecationWarnings===!1&&"undefined"!=typeof console&&console.warn&&console.warn("Deprecation warning: "+b)}function $(a,b){var c=!0,d=a+"\n"+(new Error).stack;return g(function(){return c&&(Z(d),c=!1),b.apply(this,arguments)},b)}function _(a,b){kd[a]||(Z(b),kd[a]=!0)}function aa(a){var b,c,d=a._i,e=ld.exec(d);if(e){for(j(a).iso=!0,b
 =0,c=md.length;c>b;b++)if(md[b][1].exec(d)){a._f=md[b][0]+(e[6]||" ");break}for(b=0,c=nd.length;c>b;b++)if(nd[b][1].exec(d)){a._f+=nd[b][0];break}d.match(Yc)&&(a._f+="Z"),ta(a)}else a._isValid=!1}function ba(b){var c=od.exec(b._i);return null!==c?void(b._d=new Date(+c[1])):(aa(b),void(b._isValid===!1&&(delete b._isValid,a.createFromInputFallback(b))))}function ca(a,b,c,d,e,f,g){var h=new Date(a,b,c,d,e,f,g);return 1970>a&&h.setFullYear(a),h}function da(a){var b=new Date(Date.UTC.apply(null,arguments));return 1970>a&&b.setUTCFullYear(a),b}function ea(a){return fa(a)?366:365}function fa(a){return a%4===0&&a%100!==0||a%400===0}function ga(){return fa(this.year())}function ha(a,b,c){var d,e=c-b,f=c-a.day();return f>e&&(f-=7),e-7>f&&(f+=7),d=Aa(a).add(f,"d"),{week:Math.ceil(d.dayOfYear()/7),year:d.year()}}function ia(a){return ha(a,this._week.dow,this._week.doy).week}function ja(){return this._week.dow}function ka(){return this._week.doy}function la(a){var b=this.localeData().week(this);
 return null==a?b:this.add(7*(a-b),"d")}function ma(a){var b=ha(this,1,4).week;return null==a?b:this.add(7*(a-b),"d")}function na(a,b,c,d,e){var f,g,h=da(a,0,1).getUTCDay();return h=0===h?7:h,c=null!=c?c:e,f=e-h+(h>d?7:0)-(e>h?7:0),g=7*(b-1)+(c-e)+f+1,{year:g>0?a:a-1,dayOfYear:g>0?g:ea(a-1)+g}}function oa(a){var b=Math.round((this.clone().startOf("day")-this.clone().startOf("year"))/864e5)+1;return null==a?b:this.add(a-b,"d")}function pa(a,b,c){return null!=a?a:null!=b?b:c}function qa(a){var b=new Date;return a._useUTC?[b.getUTCFullYear(),b.getUTCMonth(),b.getUTCDate()]:[b.getFullYear(),b.getMonth(),b.getDate()]}function ra(a){var b,c,d,e,f=[];if(!a._d){for(d=qa(a),a._w&&null==a._a[dd]&&null==a._a[cd]&&sa(a),a._dayOfYear&&(e=pa(a._a[bd],d[bd]),a._dayOfYear>ea(e)&&(j(a)._overflowDayOfYear=!0),c=da(e,0,a._dayOfYear),a._a[cd]=c.getUTCMonth(),a._a[dd]=c.getUTCDate()),b=0;3>b&&null==a._a[b];++b)a._a[b]=f[b]=d[b];for(;7>b;b++)a._a[b]=f[b]=null==a._a[b]?2===b?1:0:a._a[b];24===a._a[ed]&&0===
 a._a[fd]&&0===a._a[gd]&&0===a._a[hd]&&(a._nextDay=!0,a._a[ed]=0),a._d=(a._useUTC?da:ca).apply(null,f),null!=a._tzm&&a._d.setUTCMinutes(a._d.getUTCMinutes()-a._tzm),a._nextDay&&(a._a[ed]=24)}}function sa(a){var b,c,d,e,f,g,h;b=a._w,null!=b.GG||null!=b.W||null!=b.E?(f=1,g=4,c=pa(b.GG,a._a[bd],ha(Aa(),1,4).year),d=pa(b.W,1),e=pa(b.E,1)):(f=a._locale._week.dow,g=a._locale._week.doy,c=pa(b.gg,a._a[bd],ha(Aa(),f,g).year),d=pa(b.w,1),null!=b.d?(e=b.d,f>e&&++d):e=null!=b.e?b.e+f:f),h=na(c,d,e,g,f),a._a[bd]=h.year,a._dayOfYear=h.dayOfYear}function ta(b){if(b._f===a.ISO_8601)return void aa(b);b._a=[],j(b).empty=!0;var c,d,e,f,g,h=""+b._i,i=h.length,k=0;for(e=K(b._f,b._locale).match(Jc)||[],c=0;c<e.length;c++)f=e[c],d=(h.match(M(f,b))||[])[0],d&&(g=h.substr(0,h.indexOf(d)),g.length>0&&j(b).unusedInput.push(g),h=h.slice(h.indexOf(d)+d.length),k+=d.length),Mc[f]?(d?j(b).empty=!1:j(b).unusedTokens.push(f),Q(f,d,b)):b._strict&&!d&&j(b).unusedTokens.push(f);j(b).charsLeftOver=i-k,h.length>0&&j(b).u
 nusedInput.push(h),j(b).bigHour===!0&&b._a[ed]<=12&&b._a[ed]>0&&(j(b).bigHour=void 0),b._a[ed]=ua(b._locale,b._a[ed],b._meridiem),ra(b),Y(b)}function ua(a,b,c){var d;return null==c?b:null!=a.meridiemHour?a.meridiemHour(b,c):null!=a.isPM?(d=a.isPM(c),d&&12>b&&(b+=12),d||12!==b||(b=0),b):b}function va(a){var b,c,d,e,f;if(0===a._f.length)return j(a).invalidFormat=!0,void(a._d=new Date(0/0));for(e=0;e<a._f.length;e++)f=0,b=m({},a),null!=a._useUTC&&(b._useUTC=a._useUTC),b._f=a._f[e],ta(b),k(b)&&(f+=j(b).charsLeftOver,f+=10*j(b).unusedTokens.length,j(b).score=f,(null==d||d>f)&&(d=f,c=b));g(a,c||b)}function wa(a){if(!a._d){var b=A(a._i);a._a=[b.year,b.month,b.day||b.date,b.hour,b.minute,b.second,b.millisecond],ra(a)}}function xa(a){var b,e=a._i,f=a._f;return a._locale=a._locale||x(a._l),null===e||void 0===f&&""===e?l({nullInput:!0}):("string"==typeof e&&(a._i=e=a._locale.preparse(e)),o(e)?new n(Y(e)):(c(f)?va(a):f?ta(a):d(e)?a._d=e:ya(a),b=new n(Y(a)),b._nextDay&&(b.add(1,"d"),b._nextDay=v
 oid 0),b))}function ya(b){var f=b._i;void 0===f?b._d=new Date:d(f)?b._d=new Date(+f):"string"==typeof f?ba(b):c(f)?(b._a=e(f.slice(0),function(a){return parseInt(a,10)}),ra(b)):"object"==typeof f?wa(b):"number"==typeof f?b._d=new Date(f):a.createFromInputFallback(b)}function za(a,b,c,d,e){var f={};return"boolean"==typeof c&&(d=c,c=void 0),f._isAMomentObject=!0,f._useUTC=f._isUTC=e,f._l=c,f._i=a,f._f=b,f._strict=d,xa(f)}function Aa(a,b,c,d){return za(a,b,c,d,!1)}function Ba(a,b){var d,e;if(1===b.length&&c(b[0])&&(b=b[0]),!b.length)return Aa();for(d=b[0],e=1;e<b.length;++e)b[e][a](d)&&(d=b[e]);return d}function Ca(){var a=[].slice.call(arguments,0);return Ba("isBefore",a)}function Da(){var a=[].slice.call(arguments,0);return Ba("isAfter",a)}function Ea(a){var b=A(a),c=b.year||0,d=b.quarter||0,e=b.month||0,f=b.week||0,g=b.day||0,h=b.hour||0,i=b.minute||0,j=b.second||0,k=b.millisecond||0;this._milliseconds=+k+1e3*j+6e4*i+36e5*h,this._days=+g+7*f,this._months=+e+3*d+12*c,this._data={},th
 is._locale=x(),this._bubble()}function Fa(a){return a instanceof Ea}function Ga(a,b){G(a,0,0,function(){var a=this.utcOffset(),c="+";return 0>a&&(a=-a,c="-"),c+F(~~(a/60),2)+b+F(~~a%60,2)})}function Ha(a){var b=(a||"").match(Yc)||[],c=b[b.length-1]||[],d=(c+"").match(td)||["-",0,0],e=+(60*d[1])+p(d[2]);return"+"===d[0]?e:-e}function Ia(b,c){var e,f;return c._isUTC?(e=c.clone(),f=(o(b)||d(b)?+b:+Aa(b))-+e,e._d.setTime(+e._d+f),a.updateOffset(e,!1),e):Aa(b).local();return c._isUTC?Aa(b).zone(c._offset||0):Aa(b).local()}function Ja(a){return 15*-Math.round(a._d.getTimezoneOffset()/15)}function Ka(b,c){var d,e=this._offset||0;return null!=b?("string"==typeof b&&(b=Ha(b)),Math.abs(b)<16&&(b=60*b),!this._isUTC&&c&&(d=Ja(this)),this._offset=b,this._isUTC=!0,null!=d&&this.add(d,"m"),e!==b&&(!c||this._changeInProgress?$a(this,Va(b-e,"m"),1,!1):this._changeInProgress||(this._changeInProgress=!0,a.updateOffset(this,!0),this._changeInProgress=null)),this):this._isUTC?e:Ja(this)}function La(a,b)
 {return null!=a?("string"!=typeof a&&(a=-a),this.utcOffset(a,b),this):-this.utcOffset()}function Ma(a){return this.utcOffset(0,a)}function Na(a){return this._isUTC&&(this.utcOffset(0,a),this._isUTC=!1,a&&this.subtract(Ja(this),"m")),this}function Oa(){return this._tzm?this.utcOffset(this._tzm):"string"==typeof this._i&&this.utcOffset(Ha(this._i)),this}function Pa(a){return a=a?Aa(a).utcOffset():0,(this.utcOffset()-a)%60===0}function Qa(){return this.utcOffset()>this.clone().month(0).utcOffset()||this.utcOffset()>this.clone().month(5).utcOffset()}function Ra(){if(this._a){var a=this._isUTC?h(this._a):Aa(this._a);return this.isValid()&&q(this._a,a.toArray())>0}return!1}function Sa(){return!this._isUTC}function Ta(){return this._isUTC}function Ua(){return this._isUTC&&0===this._offset}function Va(a,b){var c,d,e,g=a,h=null;return Fa(a)?g={ms:a._milliseconds,d:a._days,M:a._months}:"number"==typeof a?(g={},b?g[b]=a:g.milliseconds=a):(h=ud.exec(a))?(c="-"===h[1]?-1:1,g={y:0,d:p(h[dd])*c,h:
 p(h[ed])*c,m:p(h[fd])*c,s:p(h[gd])*c,ms:p(h[hd])*c}):(h=vd.exec(a))?(c="-"===h[1]?-1:1,g={y:Wa(h[2],c),M:Wa(h[3],c),d:Wa(h[4],c),h:Wa(h[5],c),m:Wa(h[6],c),s:Wa(h[7],c),w:Wa(h[8],c)}):null==g?g={}:"object"==typeof g&&("from"in g||"to"in g)&&(e=Ya(Aa(g.from),Aa(g.to)),g={},g.ms=e.milliseconds,g.M=e.months),d=new Ea(g),Fa(a)&&f(a,"_locale")&&(d._locale=a._locale),d}function Wa(a,b){var c=a&&parseFloat(a.replace(",","."));return(isNaN(c)?0:c)*b}function Xa(a,b){var c={milliseconds:0,months:0};return c.months=b.month()-a.month()+12*(b.year()-a.year()),a.clone().add(c.months,"M").isAfter(b)&&--c.months,c.milliseconds=+b-+a.clone().add(c.months,"M"),c}function Ya(a,b){var c;return b=Ia(b,a),a.isBefore(b)?c=Xa(a,b):(c=Xa(b,a),c.milliseconds=-c.milliseconds,c.months=-c.months),c}function Za(a,b){return function(c,d){var e,f;return null===d||isNaN(+d)||(_(b,"moment()."+b+"(period, number) is deprecated. Please use moment()."+b+"(number, period)."),f=c,c=d,d=f),c="string"==typeof c?+c:c,e=Va(c
 ,d),$a(this,e,a),this}}function $a(b,c,d,e){var f=c._milliseconds,g=c._days,h=c._months;e=null==e?!0:e,f&&b._d.setTime(+b._d+f*d),g&&D(b,"Date",C(b,"Date")+g*d),h&&V(b,C(b,"Month")+h*d),e&&a.updateOffset(b,g||h)}function _a(a){var b=a||Aa(),c=Ia(b,this).startOf("day"),d=this.diff(c,"days",!0),e=-6>d?"sameElse":-1>d?"lastWeek":0>d?"lastDay":1>d?"sameDay":2>d?"nextDay":7>d?"nextWeek":"sameElse";return this.format(this.localeData().calendar(e,this,Aa(b)))}function ab(){return new n(this)}function bb(a,b){var c;return b=z("undefined"!=typeof b?b:"millisecond"),"millisecond"===b?(a=o(a)?a:Aa(a),+this>+a):(c=o(a)?+a:+Aa(a),c<+this.clone().startOf(b))}function cb(a,b){var c;return b=z("undefined"!=typeof b?b:"millisecond"),"millisecond"===b?(a=o(a)?a:Aa(a),+a>+this):(c=o(a)?+a:+Aa(a),+this.clone().endOf(b)<c)}function db(a,b,c){return this.isAfter(a,c)&&this.isBefore(b,c)}function eb(a,b){var c;return b=z(b||"millisecond"),"millisecond"===b?(a=o(a)?a:Aa(a),+this===+a):(c=+Aa(a),+this.clone
 ().startOf(b)<=c&&c<=+this.clone().endOf(b))}function fb(a){return 0>a?Math.ceil(a):Math.floor(a)}function gb(a,b,c){var d,e,f=Ia(a,this),g=6e4*(f.utcOffset()-this.utcOffset());return b=z(b),"year"===b||"month"===b||"quarter"===b?(e=hb(this,f),"quarter"===b?e/=3:"year"===b&&(e/=12)):(d=this-f,e="second"===b?d/1e3:"minute"===b?d/6e4:"hour"===b?d/36e5:"day"===b?(d-g)/864e5:"week"===b?(d-g)/6048e5:d),c?e:fb(e)}function hb(a,b){var c,d,e=12*(b.year()-a.year())+(b.month()-a.month()),f=a.clone().add(e,"months");return 0>b-f?(c=a.clone().add(e-1,"months"),d=(b-f)/(f-c)):(c=a.clone().add(e+1,"months"),d=(b-f)/(c-f)),-(e+d)}function ib(){return this.clone().locale("en").format("ddd MMM DD YYYY HH:mm:ss [GMT]ZZ")}function jb(){var a=this.clone().utc();return 0<a.year()&&a.year()<=9999?"function"==typeof Date.prototype.toISOString?this.toDate().toISOString():J(a,"YYYY-MM-DD[T]HH:mm:ss.SSS[Z]"):J(a,"YYYYYY-MM-DD[T]HH:mm:ss.SSS[Z]")}function kb(b){var c=J(this,b||a.defaultFormat);return this.loc
 aleData().postformat(c)}function lb(a,b){return this.isValid()?Va({to:this,from:a}).locale(this.locale()).humanize(!b):this.localeData().invalidDate()}function mb(a){return this.from(Aa(),a)}function nb(a,b){return this.isValid()?Va({from:this,to:a}).locale(this.locale()).humanize(!b):this.localeData().invalidDate()}function ob(a){return this.to(Aa(),a)}function pb(a){var b;return void 0===a?this._locale._abbr:(b=x(a),null!=b&&(this._locale=b),this)}function qb(){return this._locale}function rb(a){switch(a=z(a)){case"year":this.month(0);case"quarter":case"month":this.date(1);case"week":case"isoWeek":case"day":this.hours(0);case"hour":this.minutes(0);case"minute":this.seconds(0);case"second":this.milliseconds(0)}return"week"===a&&this.weekday(0),"isoWeek"===a&&this.isoWeekday(1),"quarter"===a&&this.month(3*Math.floor(this.month()/3)),this}function sb(a){return a=z(a),void 0===a||"millisecond"===a?this:this.startOf(a).add(1,"isoWeek"===a?"week":a).subtract(1,"ms")}function tb(){return
 +this._d-6e4*(this._offset||0)}function ub(){return Math.floor(+this/1e3)}function vb(){return this._offset?new Date(+this):this._d}function wb(){var a=this;return[a.year(),a.month(),a.date(),a.hour(),a.minute(),a.second(),a.millisecond()]}function xb(){return k(this)}function yb(){return g({},j(this))}function zb(){return j(this).overflow}function Ab(a,b){G(0,[a,a.length],0,b)}function Bb(a,b,c){return ha(Aa([a,11,31+b-c]),b,c).week}function Cb(a){var b=ha(this,this.localeData()._week.dow,this.localeData()._week.doy).year;return null==a?b:this.add(a-b,"y")}function Db(a){var b=ha(this,1,4).year;return null==a?b:this.add(a-b,"y")}function Eb(){return Bb(this.year(),1,4)}function Fb(){var a=this.localeData()._week;return Bb(this.year(),a.dow,a.doy)}function Gb(a){return null==a?Math.ceil((this.month()+1)/3):this.month(3*(a-1)+this.month()%3)}function Hb(a,b){if("string"==typeof a)if(isNaN(a)){if(a=b.weekdaysParse(a),"number"!=typeof a)return null}else a=parseInt(a,10);return a}functi
 on Ib(a){return this._weekdays[a.day()]}function Jb(a){return this._weekdaysShort[a.day()]}function Kb(a){return this._weekdaysMin[a.day()]}function Lb(a){var b,c,d;for(this._weekdaysParse||(this._weekdaysParse=[]),b=0;7>b;b++)if(this._weekdaysParse[b]||(c=Aa([2e3,1]).day(b),d="^"+this.weekdays(c,"")+"|^"+this.weekdaysShort(c,"")+"|^"+this.weekdaysMin(c,""),this._weekdaysParse[b]=new RegExp(d.replace(".",""),"i")),this._weekdaysParse[b].test(a))return b}function Mb(a){var b=this._isUTC?this._d.getUTCDay():this._d.getDay();return null!=a?(a=Hb(a,this.localeData()),this.add(a-b,"d")):b}function Nb(a){var b=(this.day()+7-this.localeData()._week.dow)%7;return null==a?b:this.add(a-b,"d")}function Ob(a){return null==a?this.day()||7:this.day(this.day()%7?a:a-7)}function Pb(a,b){G(a,0,0,function(){return this.localeData().meridiem(this.hours(),this.minutes(),b)})}function Qb(a,b){return b._meridiemParse}function Rb(a){return"p"===(a+"").toLowerCase().charAt(0)}function Sb(a,b,c){return a>11
 ?c?"pm":"PM":c?"am":"AM"}function Tb(a){G(0,[a,3],0,"millisecond")}function Ub(){return this._isUTC?"UTC":""}function Vb(){return this._isUTC?"Coordinated Universal Time":""}function Wb(a){return Aa(1e3*a)}function Xb(){return Aa.apply(null,arguments).parseZone()}function Yb(a,b,c){var d=this._calendar[a];return"function"==typeof d?d.call(b,c):d}function Zb(a){var b=this._longDateFormat[a];return!b&&this._longDateFormat[a.toUpperCase()]&&(b=this._longDateFormat[a.toUpperCase()].replace(/MMMM|MM|DD|dddd/g,function(a){return a.slice(1)}),this._longDateFormat[a]=b),b}function $b(){return this._invalidDate}function _b(a){return this._ordinal.replace("%d",a)}function ac(a){return a}function bc(a,b,c,d){var e=this._relativeTime[c];return"function"==typeof e?e(a,b,c,d):e.replace(/%d/i,a)}function cc(a,b){var c=this._relativeTime[a>0?"future":"past"];return"function"==typeof c?c(b):c.replace(/%s/i,b)}function dc(a){var b,c;for(c in a)b=a[c],"function"==typeof b?this[c]=b:this["_"+c]=b;this.
 _ordinalParseLenient=new RegExp(this._ordinalParse.source+"|"+/\d{1,2}/.source)}function ec(a,b,c,d){var e=x(),f=h().set(d,b);return e[c](f,a)}function fc(a,b,c,d,e){if("number"==typeof a&&(b=a,a=void 0),a=a||"",null!=b)return ec(a,b,c,e);var f,g=[];for(f=0;d>f;f++)g[f]=ec(a,f,c,e);return g}function gc(a,b){return fc(a,b,"months",12,"month")}function hc(a,b){return fc(a,b,"monthsShort",12,"month")}function ic(a,b){return fc(a,b,"weekdays",7,"day")}function jc(a,b){return fc(a,b,"weekdaysShort",7,"day")}function kc(a,b){return fc(a,b,"weekdaysMin",7,"day")}function lc(){var a=this._data;return this._milliseconds=Rd(this._milliseconds),this._days=Rd(this._days),this._months=Rd(this._months),a.milliseconds=Rd(a.milliseconds),a.seconds=Rd(a.seconds),a.minutes=Rd(a.minutes),a.hours=Rd(a.hours),a.months=Rd(a.months),a.years=Rd(a.years),this}function mc(a,b,c,d){var e=Va(b,c);return a._milliseconds+=d*e._milliseconds,a._days+=d*e._days,a._months+=d*e._months,a._bubble()}function nc(a,b){re
 turn mc(this,a,b,1)}function oc(a,b){return mc(this,a,b,-1)}function pc(){var a,b,c,d=this._milliseconds,e=this._days,f=this._months,g=this._data,h=0;return g.milliseconds=d%1e3,a=fb(d/1e3),g.seconds=a%60,b=fb(a/60),g.minutes=b%60,c=fb(b/60),g.hours=c%24,e+=fb(c/24),h=fb(qc(e)),e-=fb(rc(h)),f+=fb(e/30),e%=30,h+=fb(f/12),f%=12,g.days=e,g.months=f,g.years=h,this}function qc(a){return 400*a/146097}function rc(a){return 146097*a/400}function sc(a){var b,c,d=this._milliseconds;if(a=z(a),"month"===a||"year"===a)return b=this._days+d/864e5,c=this._months+12*qc(b),"month"===a?c:c/12;switch(b=this._days+Math.round(rc(this._months/12)),a){case"week":return b/7+d/6048e5;case"day":return b+d/864e5;case"hour":return 24*b+d/36e5;case"minute":return 1440*b+d/6e4;case"second":return 86400*b+d/1e3;case"millisecond":return Math.floor(864e5*b)+d;default:throw new Error("Unknown unit "+a)}}function tc(){return this._milliseconds+864e5*this._days+this._months%12*2592e6+31536e6*p(this._months/12)}functio
 n uc(a){return function(){return this.as(a)}}function vc(a){return a=z(a),this[a+"s"]()}function wc(a){return function(){return this._data[a]}}function xc(){return fb(this.days()/7)}function yc(a,b,c,d,e){return e.relativeTime(b||1,!!c,a,d)}function zc(a,b,c){var d=Va(a).abs(),e=fe(d.as("s")),f=fe(d.as("m")),g=fe(d.as("h")),h=fe(d.as("d")),i=fe(d.as("M")),j=fe(d.as("y")),k=e<ge.s&&["s",e]||1===f&&["m"]||f<ge.m&&["mm",f]||1===g&&["h"]||g<ge.h&&["hh",g]||1===h&&["d"]||h<ge.d&&["dd",h]||1===i&&["M"]||i<ge.M&&["MM",i]||1===j&&["y"]||["yy",j];return k[2]=b,k[3]=+a>0,k[4]=c,yc.apply(null,k)}function Ac(a,b){return void 0===ge[a]?!1:void 0===b?ge[a]:(ge[a]=b,!0)}function Bc(a){var b=this.localeData(),c=zc(this,!a,b);return a&&(c=b.pastFuture(+this,c)),b.postformat(c)}function Cc(){var a=he(this.years()),b=he(this.months()),c=he(this.days()),d=he(this.hours()),e=he(this.minutes()),f=he(this.seconds()+this.milliseconds()/1e3),g=this.asSeconds();return g?(0>g?"-":"")+"P"+(a?a+"Y":"")+(b?b+"M"
 :"")+(c?c+"D":"")+(d||e||f?"T":"")+(d?d+"H":"")+(e?e+"M":"")+(f?f+"S":""):"P0D"}var Dc,Ec,Fc=a.momentProperties=[],Gc=!1,Hc={},Ic={},Jc=/(\[[^\[]*\])|(\\)?(Mo|MM?M?M?|Do|DDDo|DD?D?D?|ddd?d?|do?|w[o|w]?|W[o|W]?|Q|YYYYYY|YYYYY|YYYY|YY|gg(ggg?)?|GG(GGG?)?|e|E|a|A|hh?|HH?|mm?|ss?|S{1,4}|x|X|zz?|ZZ?|.)/g,Kc=/(\[[^\[]*\])|(\\)?(LTS|LT|LL?L?L?|l{1,4})/g,Lc={},Mc={},Nc=/\d/,Oc=/\d\d/,Pc=/\d{3}/,Qc=/\d{4}/,Rc=/[+-]?\d{6}/,Sc=/\d\d?/,Tc=/\d{1,3}/,Uc=/\d{1,4}/,Vc=/[+-]?\d{1,6}/,Wc=/\d+/,Xc=/[+-]?\d+/,Yc=/Z|[+-]\d\d:?\d\d/gi,Zc=/[+-]?\d+(\.\d{1,3})?/,$c=/[0-9]*['a-z\u00A0-\u05FF\u0700-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF]+|[\u0600-\u06FF\/]+(\s*?[\u0600-\u06FF]+){1,2}/i,_c={},ad={},bd=0,cd=1,dd=2,ed=3,fd=4,gd=5,hd=6;G("M",["MM",2],"Mo",function(){return this.month()+1}),G("MMM",0,0,function(a){return this.localeData().monthsShort(this,a)}),G("MMMM",0,0,function(a){return this.localeData().months(this,a)}),y("month","M"),L("M",Sc),L("MM",Sc,Oc),L("MMM",$c),L("MMMM",$c),O(["M","MM"],function(a,b){b[c
 d]=p(a)-1}),O(["MMM","MMMM"],function(a,b,c,d){var e=c._locale.monthsParse(a,d,c._strict);null!=e?b[cd]=e:j(c).invalidMonth=a});var id="January_February_March_April_May_June_July_August_September_October_November_December".split("_"),jd="Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),kd={};a.suppressDeprecationWarnings=!1;var ld=/^\s*(?:[+-]\d{6}|\d{4})-(?:(\d\d-\d\d)|(W\d\d$)|(W\d\d-\d)|(\d\d\d))((T| )(\d\d(:\d\d(:\d\d(\.\d+)?)?)?)?([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,md=[["YYYYYY-MM-DD",/[+-]\d{6}-\d{2}-\d{2}/],["YYYY-MM-DD",/\d{4}-\d{2}-\d{2}/],["GGGG-[W]WW-E",/\d{4}-W\d{2}-\d/],["GGGG-[W]WW",/\d{4}-W\d{2}/],["YYYY-DDD",/\d{4}-\d{3}/]],nd=[["HH:mm:ss.SSSS",/(T| )\d\d:\d\d:\d\d\.\d+/],["HH:mm:ss",/(T| )\d\d:\d\d:\d\d/],["HH:mm",/(T| )\d\d:\d\d/],["HH",/(T| )\d\d/]],od=/^\/?Date\((\-?\d+)/i;a.createFromInputFallback=$("moment construction falls back to js Date. This is discouraged and will be removed in upcoming major release. Please refer to https://github.com/moment/mom
 ent/issues/1407 for more info.",function(a){a._d=new Date(a._i+(a._useUTC?" UTC":""))}),G(0,["YY",2],0,function(){return this.year()%100}),G(0,["YYYY",4],0,"year"),G(0,["YYYYY",5],0,"year"),G(0,["YYYYYY",6,!0],0,"year"),y("year","y"),L("Y",Xc),L("YY",Sc,Oc),L("YYYY",Uc,Qc),L("YYYYY",Vc,Rc),L("YYYYYY",Vc,Rc),O(["YYYY","YYYYY","YYYYYY"],bd),O("YY",function(b,c){c[bd]=a.parseTwoDigitYear(b)}),a.parseTwoDigitYear=function(a){return p(a)+(p(a)>68?1900:2e3)};var pd=B("FullYear",!1);G("w",["ww",2],"wo","week"),G("W",["WW",2],"Wo","isoWeek"),y("week","w"),y("isoWeek","W"),L("w",Sc),L("ww",Sc,Oc),L("W",Sc),L("WW",Sc,Oc),P(["w","ww","W","WW"],function(a,b,c,d){b[d.substr(0,1)]=p(a)});var qd={dow:0,doy:6};G("DDD",["DDDD",3],"DDDo","dayOfYear"),y("dayOfYear","DDD"),L("DDD",Tc),L("DDDD",Pc),O(["DDD","DDDD"],function(a,b,c){c._dayOfYear=p(a)}),a.ISO_8601=function(){};var rd=$("moment().min is deprecated, use moment.min instead. https://github.com/moment/moment/issues/1548",function(){var a=Aa.app
 ly(null,arguments);return this>a?this:a}),sd=$("moment().max is deprecated, use moment.max instead. https://github.com/moment/moment/issues/1548",function(){var a=Aa.apply(null,arguments);return a>this?this:a});Ga("Z",":"),Ga("ZZ",""),L("Z",Yc),L("ZZ",Yc),O(["Z","ZZ"],function(a,b,c){c._useUTC=!0,c._tzm=Ha(a)});var td=/([\+\-]|\d\d)/gi;a.updateOffset=function(){};var ud=/(\-)?(?:(\d*)\.)?(\d+)\:(\d+)(?:\:(\d+)\.?(\d{3})?)?/,vd=/^(-)?P(?:(?:([0-9,.]*)Y)?(?:([0-9,.]*)M)?(?:([0-9,.]*)D)?(?:T(?:([0-9,.]*)H)?(?:([0-9,.]*)M)?(?:([0-9,.]*)S)?)?|([0-9,.]*)W)$/;Va.fn=Ea.prototype;var wd=Za(1,"add"),xd=Za(-1,"subtract");a.defaultFormat="YYYY-MM-DDTHH:mm:ssZ";var yd=$("moment().lang() is deprecated. Instead, use moment().localeData() to get the language configuration. Use moment().locale() to change languages.",function(a){return void 0===a?this.localeData():this.locale(a)});G(0,["gg",2],0,function(){return this.weekYear()%100}),G(0,["GG",2],0,function(){return this.isoWeekYear()%100}),Ab("ggg
 g","weekYear"),Ab("ggggg","weekYear"),Ab("GGGG","isoWeekYear"),Ab("GGGGG","isoWeekYear"),y("weekYear","gg"),y("isoWeekYear","GG"),L("G",Xc),L("g",Xc),L("GG",Sc,Oc),L("gg",Sc,Oc),L("GGGG",Uc,Qc),L("gggg",Uc,Qc),L("GGGGG",Vc,Rc),L("ggggg",Vc,Rc),P(["gggg","ggggg","GGGG","GGGGG"],function(a,b,c,d){b[d.substr(0,2)]=p(a)}),P(["gg","GG"],function(b,c,d,e){c[e]=a.parseTwoDigitYear(b)}),G("Q",0,0,"quarter"),y("quarter","Q"),L("Q",Nc),O("Q",function(a,b){b[cd]=3*(p(a)-1)}),G("D",["DD",2],"Do","date"),y("date","D"),L("D",Sc),L("DD",Sc,Oc),L("Do",function(a,b){return a?b._ordinalParse:b._ordinalParseLenient}),O(["D","DD"],dd),O("Do",function(a,b){b[dd]=p(a.match(Sc)[0],10)});var zd=B("Date",!0);G("d",0,"do","day"),G("dd",0,0,function(a){return this.localeData().weekdaysMin(this,a)}),G("ddd",0,0,function(a){return this.localeData().weekdaysShort(this,a)}),G("dddd",0,0,function(a){return this.localeData().weekdays(this,a)}),G("e",0,0,"weekday"),G("E",0,0,"isoWeekday"),y("day","d"),y("weekday","e
 "),y("isoWeekday","E"),L("d",Sc),L("e",Sc),L("E",Sc),L("dd",$c),L("ddd",$c),L("dddd",$c),P(["dd","ddd","dddd"],function(a,b,c){var d=c._locale.weekdaysParse(a);null!=d?b.d=d:j(c).invalidWeekday=a}),P(["d","e","E"],function(a,b,c,d){b[d]=p(a)});var Ad="Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),Bd="Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),Cd="Su_Mo_Tu_We_Th_Fr_Sa".split("_");G("H",["HH",2],0,"hour"),G("h",["hh",2],0,function(){return this.hours()%12||12}),Pb("a",!0),Pb("A",!1),y("hour","h"),L("a",Qb),L("A",Qb),L("H",Sc),L("h",Sc),L("HH",Sc,Oc),L("hh",Sc,Oc),O(["H","HH"],ed),O(["a","A"],function(a,b,c){c._isPm=c._locale.isPM(a),c._meridiem=a}),O(["h","hh"],function(a,b,c){b[ed]=p(a),j(c).bigHour=!0});var Dd=/[ap]\.?m?\.?/i,Ed=B("Hours",!0);G("m",["mm",2],0,"minute"),y("minute","m"),L("m",Sc),L("mm",Sc,Oc),O(["m","mm"],fd);var Fd=B("Minutes",!1);G("s",["ss",2],0,"second"),y("second","s"),L("s",Sc),L("ss",Sc,Oc),O(["s","ss"],gd);var Gd=B("Seconds",!1);G("S",0,0,
 function(){return~~(this.millisecond()/100)}),G(0,["SS",2],0,function(){return~~(this.millisecond()/10)}),Tb("SSS"),Tb("SSSS"),y("millisecond","ms"),L("S",Tc,Nc),L("SS",Tc,Oc),L("SSS",Tc,Pc),L("SSSS",Wc),O(["S","SS","SSS","SSSS"],function(a,b){b[hd]=p(1e3*("0."+a))});var Hd=B("Milliseconds",!1);G("z",0,0,"zoneAbbr"),G("zz",0,0,"zoneName");var Id=n.prototype;Id.add=wd,Id.calendar=_a,Id.clone=ab,Id.diff=gb,Id.endOf=sb,Id.format=kb,Id.from=lb,Id.fromNow=mb,Id.to=nb,Id.toNow=ob,Id.get=E,Id.invalidAt=zb,Id.isAfter=bb,Id.isBefore=cb,Id.isBetween=db,Id.isSame=eb,Id.isValid=xb,Id.lang=yd,Id.locale=pb,Id.localeData=qb,Id.max=sd,Id.min=rd,Id.parsingFlags=yb,Id.set=E,Id.startOf=rb,Id.subtract=xd,Id.toArray=wb,Id.toDate=vb,Id.toISOString=jb,Id.toJSON=jb,Id.toString=ib,Id.unix=ub,Id.valueOf=tb,Id.year=pd,Id.isLeapYear=ga,Id.weekYear=Cb,Id.isoWeekYear=Db,Id.quarter=Id.quarters=Gb,Id.month=W,Id.daysInMonth=X,Id.week=Id.weeks=la,Id.isoWeek=Id.isoWeeks=ma,Id.weeksInYear=Fb,Id.isoWeeksInYear=Eb,Id.da
 te=zd,Id.day=Id.days=Mb,Id.weekday=Nb,Id.isoWeekday=Ob,Id.dayOfYear=oa,Id.hour=Id.hours=Ed,Id.minute=Id.minutes=Fd,Id.second=Id.seconds=Gd,Id.millisecond=Id.milliseconds=Hd,Id.utcOffset=Ka,Id.utc=Ma,Id.local=Na,Id.parseZone=Oa,Id.hasAlignedHourOffset=Pa,Id.isDST=Qa,Id.isDSTShifted=Ra,Id.isLocal=Sa,Id.isUtcOffset=Ta,Id.isUtc=Ua,Id.isUTC=Ua,Id.zoneAbbr=Ub,Id.zoneName=Vb,Id.dates=$("dates accessor is deprecated. Use date instead.",zd),Id.months=$("months accessor is deprecated. Use month instead",W),Id.years=$("years accessor is deprecated. Use year instead",pd),Id.zone=$("moment().zone is deprecated, use moment().utcOffset instead. https://github.com/moment/moment/issues/1779",La);var Jd=Id,Kd={sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},Ld={LTS:"h:mm:ss A",LT:"h:mm A",L:"MM/DD/YYYY",LL:"MMMM D, YYYY",LLL:"MMMM D, YYYY LT",LLLL:"dddd, MMMM D, YYYY LT"},Md="Invalid date",Nd="%d",Od=/
 \d{1,2}/,Pd={future:"in %s",past:"%s ago",s:"a few seconds",m:"a minute",mm:"%d minutes",h:"an hour",
+hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},Qd=r.prototype;Qd._calendar=Kd,Qd.calendar=Yb,Qd._longDateFormat=Ld,Qd.longDateFormat=Zb,Qd._invalidDate=Md,Qd.invalidDate=$b,Qd._ordinal=Nd,Qd.ordinal=_b,Qd._ordinalParse=Od,Qd.preparse=ac,Qd.postformat=ac,Qd._relativeTime=Pd,Qd.relativeTime=bc,Qd.pastFuture=cc,Qd.set=dc,Qd.months=S,Qd._months=id,Qd.monthsShort=T,Qd._monthsShort=jd,Qd.monthsParse=U,Qd.week=ia,Qd._week=qd,Qd.firstDayOfYear=ka,Qd.firstDayOfWeek=ja,Qd.weekdays=Ib,Qd._weekdays=Ad,Qd.weekdaysMin=Kb,Qd._weekdaysMin=Cd,Qd.weekdaysShort=Jb,Qd._weekdaysShort=Bd,Qd.weekdaysParse=Lb,Qd.isPM=Rb,Qd._meridiemParse=Dd,Qd.meridiem=Sb,v("en",{ordinalParse:/\d{1,2}(th|st|nd|rd)/,ordinal:function(a){var b=a%10,c=1===p(a%100/10)?"th":1===b?"st":2===b?"nd":3===b?"rd":"th";return a+c}}),a.lang=$("moment.lang is deprecated. Use moment.locale instead.",v),a.langData=$("moment.langData is deprecated. Use moment.localeData instead.",x);var Rd=Math.abs,
 Sd=uc("ms"),Td=uc("s"),Ud=uc("m"),Vd=uc("h"),Wd=uc("d"),Xd=uc("w"),Yd=uc("M"),Zd=uc("y"),$d=wc("milliseconds"),_d=wc("seconds"),ae=wc("minutes"),be=wc("hours"),ce=wc("days"),de=wc("months"),ee=wc("years"),fe=Math.round,ge={s:45,m:45,h:22,d:26,M:11},he=Math.abs,ie=Ea.prototype;ie.abs=lc,ie.add=nc,ie.subtract=oc,ie.as=sc,ie.asMilliseconds=Sd,ie.asSeconds=Td,ie.asMinutes=Ud,ie.asHours=Vd,ie.asDays=Wd,ie.asWeeks=Xd,ie.asMonths=Yd,ie.asYears=Zd,ie.valueOf=tc,ie._bubble=pc,ie.get=vc,ie.milliseconds=$d,ie.seconds=_d,ie.minutes=ae,ie.hours=be,ie.days=ce,ie.weeks=xc,ie.months=de,ie.years=ee,ie.humanize=Bc,ie.toISOString=Cc,ie.toString=Cc,ie.toJSON=Cc,ie.locale=pb,ie.localeData=qb,ie.toIsoString=$("toIsoString() is deprecated. Please use toISOString() instead (notice the capitals)",Cc),ie.lang=yd,G("X",0,0,"unix"),G("x",0,0,"valueOf"),L("x",Xc),L("X",Zc),O("X",function(a,b,c){c._d=new Date(1e3*parseFloat(a,10))}),O("x",function(a,b,c){c._d=new Date(p(a))}),a.version="2.10.3",b(Aa),a.fn=Jd,a.m
 in=Ca,a.max=Da,a.utc=h,a.unix=Wb,a.months=gc,a.isDate=d,a.locale=v,a.invalid=l,a.duration=Va,a.isMoment=o,a.weekdays=ic,a.parseZone=Xb,a.localeData=x,a.isDuration=Fa,a.monthsShort=hc,a.weekdaysMin=kc,a.defineLocale=w,a.weekdaysShort=jc,a.normalizeUnits=z,a.relativeTimeThreshold=Ac;var je=a;return je});
\ No newline at end of file