You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xg...@apache.org on 2017/07/31 16:00:33 UTC

[01/50] [abbrv] hadoop git commit: YARN-6779. DominantResourceFairnessPolicy.DominantResourceFairnessComparator.calculateShares() should be @VisibleForTesting (Contributed by Yeliang Cang via Daniel Templeton) [Forced Update!]

Repository: hadoop
Updated Branches:
  refs/heads/YARN-5734 4cce82220 -> 7915ee3da (forced update)


YARN-6779. DominantResourceFairnessPolicy.DominantResourceFairnessComparator.calculateShares() should be @VisibleForTesting
(Contributed by Yeliang Cang via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb30bd37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb30bd37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb30bd37

Branch: refs/heads/YARN-5734
Commit: bb30bd3771442df253cbe55c448379580bd5ad07
Parents: 4c40cd4
Author: Daniel Templeton <te...@apache.org>
Authored: Mon Jul 24 12:13:50 2017 -0700
Committer: Daniel Templeton <te...@apache.org>
Committed: Mon Jul 24 12:13:50 2017 -0700

----------------------------------------------------------------------
 .../scheduler/fair/policies/DominantResourceFairnessPolicy.java    | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb30bd37/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
index 193ed4d..72377b0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies;
 import java.util.Collection;
 import java.util.Comparator;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -174,6 +175,7 @@ public class DominantResourceFairnessPolicy extends SchedulingPolicy {
      * by largest share.  So if resource=<10 MB, 5 CPU>, and pool=<100 MB, 10 CPU>,
      * shares will be [.1, .5] and resourceOrder will be [CPU, MEMORY].
      */
+    @VisibleForTesting
     void calculateShares(Resource resource, Resource pool,
         ResourceWeights shares, ResourceType[] resourceOrder, ResourceWeights weights) {
       shares.setWeight(MEMORY, (float)resource.getMemorySize() /


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: HADOOP-11875. [JDK9] Adding a second copy of Hamlet without _ as a one-character identifier.

Posted by xg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletGen.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletGen.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletGen.java
new file mode 100644
index 0000000..c6ca93c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletGen.java
@@ -0,0 +1,449 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.hamlet2;
+
+import com.google.common.collect.Sets;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Method;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.util.Set;
+import java.util.regex.Pattern;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.webapp.WebAppException;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Generates a specific hamlet implementation class from a spec class
+ * using a generic hamlet implementation class.
+ */
+@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
+public class HamletGen {
+  static final Logger LOG = LoggerFactory.getLogger(HamletGen.class);
+  static final Options opts = new Options();
+  static {
+    opts.addOption("h", "help", false, "Print this help message").
+         addOption("s", "spec-class", true,
+                   "The class that holds the spec interfaces. e.g. HamletSpec").
+         addOption("i", "impl-class", true,
+                   "An implementation class. e.g. HamletImpl").
+         addOption("o", "output-class", true, "Output class name").
+         addOption("p", "output-package", true, "Output package name");
+  };
+
+  static final Pattern elementRegex = Pattern.compile("^[A-Z][A-Z0-9]*$");
+
+  int bytes = 0;
+  PrintWriter out;
+  final Set<String> endTagOptional = Sets.newHashSet();
+  final Set<String> inlineElements = Sets.newHashSet();
+  Class<?> top; // html top-level interface
+  String hamlet; // output class simple name;
+  boolean topMode;
+
+  /**
+   * Generate a specific Hamlet implementation from a spec.
+   * @param specClass holds hamlet interfaces. e.g. {@link HamletSpec}
+   * @param implClass a generic hamlet implementation. e.g. {@link HamletImpl}
+   * @param outputName name of the output class. e.g. {@link Hamlet}
+   * @param outputPkg package name of the output class.
+   * @throws IOException
+   */
+  public void generate(Class<?> specClass, Class<?> implClass,
+                       String outputName, String outputPkg) throws IOException {
+    LOG.info("Generating {} using {} and {}", new Object[]{outputName,
+             specClass, implClass});
+    out = new PrintWriter(outputName +".java", "UTF-8");
+    hamlet = basename(outputName);
+    String pkg = pkgName(outputPkg, implClass.getPackage().getName());
+    puts(0, "// Generated by HamletGen. Do NOT edit!\n",
+         "package ", pkg, ";\n",
+         "import java.io.PrintWriter;\n",
+         "import java.util.EnumSet;\n",
+         "import static java.util.EnumSet.*;\n",
+         "import static ", implClass.getName(), ".EOpt.*;\n",
+         "import org.apache.hadoop.yarn.webapp.SubView;");
+    String implClassName = implClass.getSimpleName();
+    if (!implClass.getPackage().getName().equals(pkg)) {
+      puts(0, "import ", implClass.getName(), ';');
+    }
+    puts(0, "\n",
+         "public class ", hamlet, " extends ", implClassName,
+         " implements ", specClass.getSimpleName(), "._Html {\n",
+         "  public ", hamlet, "(PrintWriter out, int nestLevel,",
+         " boolean wasInline) {\n",
+         "    super(out, nestLevel, wasInline);\n",
+         "  }\n\n", // inline is context sensitive
+         "  static EnumSet<EOpt> opt(boolean endTag, boolean inline, ",
+         "boolean pre) {\n",
+         "    EnumSet<EOpt> opts = of(ENDTAG);\n",
+         "    if (!endTag) opts.remove(ENDTAG);\n",
+         "    if (inline) opts.add(INLINE);\n",
+         "    if (pre) opts.add(PRE);\n",
+         "    return opts;\n",
+         "  }");
+    initLut(specClass);
+    genImpl(specClass, implClassName, 1);
+    LOG.info("Generating {} methods", hamlet);
+    genMethods(hamlet, top, 1);
+    puts(0, "}");
+    out.close();
+    LOG.info("Wrote {} bytes to {}.java", bytes, outputName);
+  }
+
+  String basename(String path) {
+    return path.substring(path.lastIndexOf('/') + 1);
+  }
+
+  String pkgName(String pkg, String defaultPkg) {
+    if (pkg == null || pkg.isEmpty()) return defaultPkg;
+    return pkg;
+  }
+
+  void initLut(Class<?> spec) {
+    endTagOptional.clear();
+    inlineElements.clear();
+    for (Class<?> cls : spec.getClasses()) {
+      Annotation a = cls.getAnnotation(HamletSpec.Element.class);
+      if (a != null && !((HamletSpec.Element) a).endTag()) {
+        endTagOptional.add(cls.getSimpleName());
+      }
+      if (cls.getSimpleName().equals("Inline")) {
+        for (Method method : cls.getMethods()) {
+          String retName = method.getReturnType().getSimpleName();
+          if (isElement(retName)) {
+            inlineElements.add(retName);
+          }
+        }
+      }
+    }
+  }
+
+  void genImpl(Class<?> spec, String implClassName, int indent) {
+    String specName = spec.getSimpleName();
+    for (Class<?> cls : spec.getClasses()) {
+      String className = cls.getSimpleName();
+      if (cls.isInterface()) {
+        genFactoryMethods(cls, indent);
+      }
+      if (isElement(className)) {
+        LOG.info("Generating class {}<T>", className);
+        puts(indent, "\n",
+             "public class ", className, "<T extends __>",
+             " extends EImp<T> implements ", specName, ".", className, " {\n",
+             "  public ", className, "(String name, T parent,",
+             " EnumSet<EOpt> opts) {\n",
+             "    super(name, parent, opts);\n",
+             "  }");
+        genMethods(className, cls, indent + 1);
+        puts(indent, "}");
+      } else if (className.equals("_Html")) {
+        top = cls;
+      }
+    }
+  }
+
+  void genFactoryMethods(Class<?> cls, int indent) {
+    for (Method method : cls.getDeclaredMethods()) {
+      String retName = method.getReturnType().getSimpleName();
+      String methodName = method.getName();
+      if (methodName.charAt(0) == '$') continue;
+      if (isElement(retName) && method.getParameterTypes().length == 0) {
+        genFactoryMethod(retName, methodName, indent);
+      }
+    }
+  }
+
+  void genMethods(String className, Class<?> cls, int indent) {
+    topMode = (top != null && cls.equals(top));
+    for (Method method : cls.getMethods()) {
+      String retName = method.getReturnType().getSimpleName();
+      if (method.getName().charAt(0) == '$') {
+        genAttributeMethod(className, method, indent);
+      } else if (isElement(retName)) {
+        genNewElementMethod(className, method, indent);
+      } else {
+        genCurElementMethod(className, method, indent);
+      }
+    }
+  }
+
+  void genAttributeMethod(String className, Method method, int indent) {
+    String methodName = method.getName();
+    String attrName = methodName.substring(1).replace("__", "-");
+    Type[] params = method.getGenericParameterTypes();
+    echo(indent, "\n",
+         "@Override\n",
+         "public ", className, topMode ? " " : "<T> ", methodName, "(");
+    if (params.length == 0) {
+      puts(0, ") {");
+      puts(indent,
+           "  addAttr(\"", attrName, "\", null);\n",
+           "  return this;\n", "}");
+    } else if (params.length == 1) {
+      String typeName = getTypeName(params[0]);
+      puts(0, typeName, " value) {");
+      if (typeName.equals("EnumSet<LinkType>")) {
+        puts(indent,
+             "  addRelAttr(\"", attrName, "\", value);\n",
+             "  return this;\n", "}");
+      } else if (typeName.equals("EnumSet<Media>")) {
+        puts(indent,
+             "  addMediaAttr(\"", attrName, "\", value);\n",
+             "  return this;\n", "}");
+      } else {
+        puts(indent,
+             "  addAttr(\"", attrName, "\", value);\n",
+             "  return this;\n", "}");
+      }
+    } else {
+      throwUnhandled(className, method);
+    }
+  }
+
+  String getTypeName(Type type) {
+    if (type instanceof Class<?>) {
+      return ((Class<?>)type).getSimpleName();
+    }
+    ParameterizedType pt = (ParameterizedType) type;
+    return ((Class<?>)pt.getRawType()).getSimpleName() +"<"+
+        ((Class<?>)pt.getActualTypeArguments()[0]).getSimpleName() +">";
+  }
+
+  void genFactoryMethod(String retName, String methodName, int indent) {
+    puts(indent, "\n",
+         "private <T extends __> ", retName, "<T> ", methodName,
+         "__(T e, boolean inline) {\n",
+         "  return new ", retName, "<T>(\"", StringUtils.toLowerCase(retName),
+         "\", e, opt(", !endTagOptional.contains(retName), ", inline, ",
+         retName.equals("PRE"), ")); }");
+  }
+
+  void genNewElementMethod(String className, Method method, int indent) {
+    String methodName = method.getName();
+    String retName = method.getReturnType().getSimpleName();
+    Class<?>[] params = method.getParameterTypes();
+    echo(indent, "\n",
+         "@Override\n",
+         "public ", retName, "<", className, topMode ? "> " : "<T>> ",
+         methodName, "(");
+    if (params.length == 0) {
+      puts(0, ") {");
+      puts(indent,
+           topMode ? "" : "  closeAttrs();\n",
+           "  return ", StringUtils.toLowerCase(retName), "__" + "(this, ",
+           isInline(className, retName), ");\n", "}");
+    } else if (params.length == 1) {
+      puts(0, "String selector) {");
+      puts(indent,
+           "  return setSelector(", methodName, "(), selector);\n", "}");
+    } else {
+      throwUnhandled(className, method);
+    }
+  }
+
+  boolean isInline(String container, String className) {
+    if ((container.equals("BODY") || container.equals(hamlet) ||
+         container.equals("HEAD") || container.equals("HTML")) &&
+        (className.equals("INS") || className.equals("DEL") ||
+         className.equals("SCRIPT"))) {
+      return false;
+    }
+    return inlineElements.contains(className);
+  }
+
+  void genCurElementMethod(String className, Method method, int indent) {
+    String methodName = method.getName();
+    Class<?>[] params = method.getParameterTypes();
+    if (topMode || params.length > 0) {
+      echo(indent, "\n",
+         "@Override\n",
+         "public ", className, topMode ? " " : "<T> ", methodName, "(");
+    }
+    if (params.length == 0) {
+      if (topMode) {
+        puts(0, ") {");
+        puts(indent, "  return this;\n", "}");
+      }
+    } else if (params.length == 1) {
+      if (methodName.equals("base")) {
+        puts(0, "String href) {");
+        puts(indent,
+             "  return base().$href(href).__();\n", "}");
+      } else if (methodName.equals("script")) {
+        puts(0, "String src) {");
+        puts(indent,
+             "  return setScriptSrc(script(), src).__();\n", "}");
+      } else if (methodName.equals("style")) {
+        puts(0, "Object... lines) {");
+        puts(indent,
+             "  return style().$type(\"text/css\").__(lines).__();\n", "}");
+      } else if (methodName.equals("img")) {
+        puts(0, "String src) {");
+        puts(indent,
+             "  return ", methodName, "().$src(src).__();\n", "}");
+      } else if (methodName.equals("br") || methodName.equals("hr") ||
+                 methodName.equals("col")) {
+        puts(0, "String selector) {");
+        puts(indent,
+             "  return setSelector(", methodName, "(), selector).__();\n", "}");
+      }  else if (methodName.equals("link")) {
+        puts(0, "String href) {");
+        puts(indent,
+             "  return setLinkHref(", methodName, "(), href).__();\n", "}");
+      } else if (methodName.equals("__")) {
+        if (params[0].getSimpleName().equals("Class")) {
+          puts(0, "Class<? extends SubView> cls) {");
+          puts(indent,
+               "  ", topMode ? "subView" : "_v", "(cls);\n",
+               "  return this;\n", "}");
+        } else {
+          puts(0, "Object... lines) {");
+          puts(indent,
+               "  _p(", needsEscaping(className), ", lines);\n",
+               "  return this;\n", "}");
+        }
+      } else if (methodName.equals("_r")) {
+        puts(0, "Object... lines) {");
+        puts(indent,
+             "  _p(false, lines);\n",
+             "  return this;\n", "}");
+      } else {
+        puts(0, "String cdata) {");
+        puts(indent,
+             "  return ", methodName, "().__(cdata).__();\n", "}");
+      }
+    } else if (params.length == 2) {
+      if (methodName.equals("meta")) {
+        puts(0, "String name, String content) {");
+        puts(indent,
+             "  return meta().$name(name).$content(content).__();\n", "}");
+      } else if (methodName.equals("meta_http")) {
+        puts(0, "String header, String content) {");
+        puts(indent,
+             "  return meta().$http_equiv(header).$content(content).__();\n",
+             "}");
+      } else if (methodName.equals("a")) {
+        puts(0, "String href, String anchorText) {");
+        puts(indent,
+             "  return a().$href(href).__(anchorText).__();\n", "}");
+      } else if (methodName.equals("bdo")) {
+        puts(0, "Dir dir, String cdata) {");
+        puts(indent, "  return bdo().$dir(dir).__(cdata).__();\n", "}");
+      } else if (methodName.equals("label")) {
+        puts(0, "String forId, String cdata) {");
+        puts(indent, "  return label().$for(forId).__(cdata).__();\n", "}");
+      } else if (methodName.equals("param")) {
+        puts(0, "String name, String value) {");
+        puts(indent,
+             "  return param().$name(name).$value(value).__();\n", "}");
+      } else {
+        puts(0, "String selector, String cdata) {");
+        puts(indent,
+             "  return setSelector(", methodName,
+             "(), selector).__(cdata).__();\n", "}");
+      }
+    } else if (params.length == 3) {
+      if (methodName.equals("a")) {
+        puts(0, "String selector, String href, String anchorText) {");
+        puts(indent,
+             "  return setSelector(a(), selector)",
+             ".$href(href).__(anchorText).__();\n", "}");
+      }
+    } else {
+      throwUnhandled(className, method);
+    }
+  }
+
+  static boolean needsEscaping(String eleName) {
+    return !eleName.equals("SCRIPT") && !eleName.equals("STYLE");
+  }
+
+  static void throwUnhandled(String className, Method method) {
+    throw new WebAppException("Unhandled " + className + "#" + method);
+  }
+
+  void echo(int indent, Object... args) {
+    String prev = null;
+    for (Object o : args) {
+      String s = String.valueOf(o);
+      if (!s.isEmpty() && !s.equals("\n") &&
+          (prev == null || prev.endsWith("\n"))) {
+        indent(indent);
+      }
+      prev = s;
+      out.print(s);
+      bytes += s.length();
+    }
+  }
+
+  void indent(int indent) {
+    for (int i = 0; i < indent; ++i) {
+      out.print("  ");
+      bytes += 2;
+    }
+  }
+
+  void puts(int indent, Object... args) {
+    echo(indent, args);
+    out.println();
+    ++bytes;
+  }
+
+  boolean isElement(String s) {
+    return elementRegex.matcher(s).matches();
+  }
+
+  public static void main(String[] args) throws Exception {
+    CommandLine cmd = new GnuParser().parse(opts, args);
+    if (cmd.hasOption("help")) {
+      new HelpFormatter().printHelp("Usage: hbgen [OPTIONS]", opts);
+      return;
+    }
+    // defaults
+    Class<?> specClass = HamletSpec.class;
+    Class<?> implClass = HamletImpl.class;
+    String outputClass = "HamletTmp";
+    String outputPackage = implClass.getPackage().getName();
+    if (cmd.hasOption("spec-class")) {
+      specClass = Class.forName(cmd.getOptionValue("spec-class"));
+    }
+    if (cmd.hasOption("impl-class")) {
+      implClass = Class.forName(cmd.getOptionValue("impl-class"));
+    }
+    if (cmd.hasOption("output-class")) {
+      outputClass = cmd.getOptionValue("output-class");
+    }
+    if (cmd.hasOption("output-package")) {
+      outputPackage = cmd.getOptionValue("output-package");
+    }
+    new HamletGen().generate(specClass, implClass, outputClass, outputPackage);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java
new file mode 100644
index 0000000..995e9fb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java
@@ -0,0 +1,385 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.hamlet2;
+
+import com.google.common.base.Joiner;
+import static com.google.common.base.Preconditions.*;
+import com.google.common.base.Splitter;
+import com.google.common.collect.Iterables;
+
+import java.io.PrintWriter;
+import java.util.EnumSet;
+import static java.util.EnumSet.*;
+import java.util.Iterator;
+
+import static org.apache.commons.lang.StringEscapeUtils.*;
+import static org.apache.hadoop.yarn.webapp.hamlet2.HamletImpl.EOpt.*;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.WebAppException;
+
+
+/**
+ * A simple unbuffered generic hamlet implementation.
+ *
+ * Zero copy but allocation on every element, which could be
+ * optimized to use a thread-local element pool.
+ *
+ * Prints HTML as it builds. So the order is important.
+ */
+@InterfaceAudience.Private
+public class HamletImpl extends HamletSpec {
+  private static final String INDENT_CHARS = "  ";
+  private static final Splitter SS = Splitter.on('.').
+      omitEmptyStrings().trimResults();
+  private static final Joiner SJ = Joiner.on(' ');
+  private static final Joiner CJ = Joiner.on(", ");
+  static final int S_ID = 0;
+  static final int S_CLASS = 1;
+
+  int nestLevel;
+  int indents; // number of indent() called. mostly for testing.
+  private final PrintWriter out;
+  private final StringBuilder sb = new StringBuilder(); // not shared
+  private boolean wasInline = false;
+
+  /**
+   * Element options. (whether it needs end tag, is inline etc.)
+   */
+  public enum EOpt {
+    /** needs end(close) tag */
+    ENDTAG,
+    /** The content is inline */
+    INLINE,
+    /** The content is preformatted */
+    PRE
+  };
+
+  /**
+   * The base class for elements
+   * @param <T> type of the parent (containing) element for the element
+   */
+  public class EImp<T extends __> implements _Child {
+    private final String name;
+    private final T parent; // short cut for parent element
+    private final EnumSet<EOpt> opts; // element options
+
+    private boolean started = false;
+    private boolean attrsClosed = false;
+
+    EImp(String name, T parent, EnumSet<EOpt> opts) {
+      this.name = name;
+      this.parent = parent;
+      this.opts = opts;
+    }
+
+    @Override
+    public T __() {
+      closeAttrs();
+      --nestLevel;
+      printEndTag(name, opts);
+      return parent;
+    }
+
+    protected void _p(boolean quote, Object... args) {
+      closeAttrs();
+      for (Object s : args) {
+        if (!opts.contains(PRE)) {
+          indent(opts);
+        }
+        out.print(quote ? escapeHtml(String.valueOf(s))
+                        : String.valueOf(s));
+        if (!opts.contains(INLINE) && !opts.contains(PRE)) {
+          out.println();
+        }
+      }
+    }
+
+    protected void _v(Class<? extends SubView> cls) {
+      closeAttrs();
+      subView(cls);
+    }
+
+    protected void closeAttrs() {
+      if (!attrsClosed) {
+        startIfNeeded();
+        ++nestLevel;
+        out.print('>');
+        if (!opts.contains(INLINE) && !opts.contains(PRE)) {
+          out.println();
+        }
+        attrsClosed = true;
+      }
+    }
+
+    protected void addAttr(String name, String value) {
+      checkState(!attrsClosed, "attribute added after content");
+      startIfNeeded();
+      printAttr(name, value);
+    }
+
+    protected void addAttr(String name, Object value) {
+      addAttr(name, String.valueOf(value));
+    }
+
+    protected void addMediaAttr(String name, EnumSet<Media> media) {
+      // 6.13 comma-separated list
+      addAttr(name, CJ.join(media));
+    }
+
+    protected void addRelAttr(String name, EnumSet<LinkType> types) {
+      // 6.12 space-separated list
+      addAttr(name, SJ.join(types));
+    }
+
+    private void startIfNeeded() {
+      if (!started) {
+        printStartTag(name, opts);
+        started = true;
+      }
+    }
+
+    protected void _inline(boolean choice) {
+      if (choice) {
+        opts.add(INLINE);
+      } else {
+        opts.remove(INLINE);
+      }
+    }
+
+    protected void _endTag(boolean choice) {
+      if (choice) {
+        opts.add(ENDTAG);
+      } else {
+        opts.remove(ENDTAG);
+      }
+    }
+
+    protected void _pre(boolean choice) {
+      if (choice) {
+        opts.add(PRE);
+      } else {
+        opts.remove(PRE);
+      }
+    }
+  }
+
+  public class Generic<T extends __> extends EImp<T> implements PCData {
+    Generic(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    public Generic<T> _inline() {
+      super._inline(true);
+      return this;
+    }
+
+    public Generic<T> _noEndTag() {
+      super._endTag(false);
+      return this;
+    }
+
+    public Generic<T> _pre() {
+      super._pre(true);
+      return this;
+    }
+
+    public Generic<T> _attr(String name, String value) {
+      addAttr(name, value);
+      return this;
+    }
+
+    public Generic<Generic<T>> _elem(String name, EnumSet<EOpt> opts) {
+      closeAttrs();
+      return new Generic<Generic<T>>(name, this, opts);
+    }
+
+    public Generic<Generic<T>> elem(String name) {
+      return _elem(name, of(ENDTAG));
+    }
+
+    @Override
+    public Generic<T> __(Object... lines) {
+      _p(true, lines);
+      return this;
+    }
+
+    @Override
+    public Generic<T> _r(Object... lines) {
+      _p(false, lines);
+      return this;
+    }
+  }
+
+  public HamletImpl(PrintWriter out, int nestLevel, boolean wasInline) {
+    this.out = out;
+    this.nestLevel = nestLevel;
+    this.wasInline = wasInline;
+  }
+
+  public int nestLevel() {
+    return nestLevel;
+  }
+
+  public boolean wasInline() {
+    return wasInline;
+  }
+
+  public void setWasInline(boolean state) {
+    wasInline = state;
+  }
+
+  public PrintWriter getWriter() {
+    return out;
+  }
+
+  /**
+   * Create a root-level generic element.
+   * Mostly for testing purpose.
+   * @param <T> type of the parent element
+   * @param name of the element
+   * @param opts {@link EOpt element options}
+   * @return the element
+   */
+  public <T extends __>
+  Generic<T> root(String name, EnumSet<EOpt> opts) {
+    return new Generic<T>(name, null, opts);
+  }
+
+  public <T extends __> Generic<T> root(String name) {
+    return root(name, of(ENDTAG));
+  }
+
+  protected void printStartTag(String name, EnumSet<EOpt> opts) {
+    indent(opts);
+    sb.setLength(0);
+    out.print(sb.append('<').append(name).toString()); // for easier mock test
+  }
+
+  protected void indent(EnumSet<EOpt> opts) {
+    if (opts.contains(INLINE) && wasInline) {
+      return;
+    }
+    if (wasInline) {
+      out.println();
+    }
+    wasInline = opts.contains(INLINE) || opts.contains(PRE);
+    for (int i = 0; i < nestLevel; ++i) {
+      out.print(INDENT_CHARS);
+    }
+    ++indents;
+  }
+
+  protected void printEndTag(String name, EnumSet<EOpt> opts) {
+    if (!opts.contains(ENDTAG)) {
+      return;
+    }
+    if (!opts.contains(PRE)) {
+      indent(opts);
+    } else {
+      wasInline = opts.contains(INLINE);
+    }
+    sb.setLength(0);
+    out.print(sb.append("</").append(name).append('>').toString()); // ditto
+    if (!opts.contains(INLINE)) {
+      out.println();
+    }
+  }
+
+  protected void printAttr(String name, String value) {
+    sb.setLength(0);
+    sb.append(' ').append(name);
+    if (value != null) {
+      sb.append("=\"").append(escapeHtml(value)).append("\"");
+    }
+    out.print(sb.toString());
+  }
+
+  /**
+   * Sub-classes should override this to do something interesting.
+   * @param cls the sub-view class
+   */
+  protected void subView(Class<? extends SubView> cls) {
+    indent(of(ENDTAG)); // not an inline view
+    sb.setLength(0);
+    out.print(sb.append('[').append(cls.getName()).append(']').toString());
+    out.println();
+  }
+
+  /**
+   * Parse selector into id and classes
+   * @param selector in the form of (#id)?(.class)*
+   * @return an two element array [id, "space-separated classes"].
+   *         Either element could be null.
+   * @throws WebAppException when both are null or syntax error.
+   */
+  public static String[] parseSelector(String selector) {
+    String[] result = new String[]{null, null};
+    Iterable<String> rs = SS.split(selector);
+    Iterator<String> it = rs.iterator();
+    if (it.hasNext()) {
+      String maybeId = it.next();
+      if (maybeId.charAt(0) == '#') {
+        result[S_ID] = maybeId.substring(1);
+        if (it.hasNext()) {
+          result[S_CLASS] = SJ.join(Iterables.skip(rs, 1));
+        }
+      } else {
+        result[S_CLASS] = SJ.join(rs);
+      }
+      return result;
+    }
+    throw new WebAppException("Error parsing selector: "+ selector);
+  }
+
+  /**
+   * Set id and/or class attributes for an element.
+   * @param <E> type of the element
+   * @param e the element
+   * @param selector Haml form of "(#id)?(.class)*"
+   * @return the element
+   */
+  public static <E extends CoreAttrs> E setSelector(E e, String selector) {
+    String[] res = parseSelector(selector);
+    if (res[S_ID] != null) {
+      e.$id(res[S_ID]);
+    }
+    if (res[S_CLASS] != null) {
+      e.$class(res[S_CLASS]);
+    }
+    return e;
+  }
+
+  public static <E extends LINK> E setLinkHref(E e, String href) {
+    if (href.endsWith(".css")) {
+      e.$rel("stylesheet"); // required in html5
+    }
+    e.$href(href);
+    return e;
+  }
+
+  public static <E extends SCRIPT> E setScriptSrc(E e, String src) {
+    if (src.endsWith(".js")) {
+      e.$type("text/javascript"); // required in html4
+    }
+    e.$src(src);
+    return e;
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: HDFS-12206. Rename the split EC / replicated block metrics.

Posted by xg...@apache.org.
HDFS-12206. Rename the split EC / replicated block metrics.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/480c8db4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/480c8db4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/480c8db4

Branch: refs/heads/YARN-5734
Commit: 480c8db40c09cd0e25b4d145bc871b70a45d4f50
Parents: 77791e4
Author: Andrew Wang <wa...@apache.org>
Authored: Fri Jul 28 11:24:44 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Fri Jul 28 11:24:44 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +-
 .../server/blockmanagement/BlockManager.java    | 22 ++---
 .../hdfs/server/namenode/FSNamesystem.java      | 92 ++++++++++----------
 .../namenode/metrics/ECBlockGroupsMBean.java    | 59 +++++++++++++
 .../metrics/ECBlockGroupsStatsMBean.java        | 59 -------------
 .../namenode/metrics/ReplicatedBlocksMBean.java | 63 ++++++++++++++
 .../metrics/ReplicatedBlocksStatsMBean.java     | 63 --------------
 .../server/namenode/TestAddStripedBlocks.java   |  4 +-
 .../server/namenode/TestNameNodeMXBean.java     |  8 +-
 .../namenode/TestReconstructStripedBlocks.java  |  4 +-
 .../namenode/metrics/TestNameNodeMetrics.java   | 22 ++---
 11 files changed, 199 insertions(+), 199 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/480c8db4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1f60f32..d9568f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1031,7 +1031,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final boolean DFS_PIPELINE_ECN_ENABLED_DEFAULT = false;
 
   // Key Provider Cache Expiry
-  public static final String DFS_DATANODE_BLOCK_PINNING_ENABLED = 
+  public static final String DFS_DATANODE_BLOCK_PINNING_ENABLED =
     "dfs.datanode.block-pinning.enabled";
   public static final boolean DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT =
     false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/480c8db4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a5ee30b..fc754a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -232,47 +232,47 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /** Used by metrics. */
-  public long getLowRedundancyBlocksStat() {
+  public long getLowRedundancyBlocks() {
     return neededReconstruction.getLowRedundancyBlocksStat();
   }
 
   /** Used by metrics. */
-  public long getCorruptBlocksStat() {
+  public long getCorruptBlocks() {
     return corruptReplicas.getCorruptBlocksStat();
   }
 
   /** Used by metrics. */
-  public long getMissingBlocksStat() {
+  public long getMissingBlocks() {
     return neededReconstruction.getCorruptBlocksStat();
   }
 
   /** Used by metrics. */
-  public long getMissingReplicationOneBlocksStat() {
+  public long getMissingReplicationOneBlocks() {
     return neededReconstruction.getCorruptReplicationOneBlocksStat();
   }
 
   /** Used by metrics. */
-  public long getPendingDeletionBlocksStat() {
+  public long getPendingDeletionReplicatedBlocks() {
     return invalidateBlocks.getBlocksStat();
   }
 
   /** Used by metrics. */
-  public long getLowRedundancyECBlockGroupsStat() {
+  public long getLowRedundancyECBlockGroups() {
     return neededReconstruction.getLowRedundancyECBlockGroupsStat();
   }
 
   /** Used by metrics. */
-  public long getCorruptECBlockGroupsStat() {
+  public long getCorruptECBlockGroups() {
     return corruptReplicas.getCorruptECBlockGroupsStat();
   }
 
   /** Used by metrics. */
-  public long getMissingECBlockGroupsStat() {
+  public long getMissingECBlockGroups() {
     return neededReconstruction.getCorruptECBlockGroupsStat();
   }
 
   /** Used by metrics. */
-  public long getPendingDeletionECBlockGroupsStat() {
+  public long getPendingDeletionECBlockGroups() {
     return invalidateBlocks.getECBlockGroupsStat();
   }
 
@@ -2292,11 +2292,11 @@ public class BlockManager implements BlockStatsMXBean {
     return bmSafeMode.getBytesInFuture();
   }
 
-  public long getBytesInFutureReplicatedBlocksStat() {
+  public long getBytesInFutureReplicatedBlocks() {
     return bmSafeMode.getBytesInFutureBlocks();
   }
 
-  public long getBytesInFutureStripedBlocksStat() {
+  public long getBytesInFutureECBlockGroups() {
     return bmSafeMode.getBytesInFutureECBlockGroups();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/480c8db4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 9872cd7..fd4ab8d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -92,6 +92,7 @@ import static org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.*;
 import org.apache.hadoop.hdfs.protocol.BlocksStats;
 import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.server.namenode.metrics.ReplicatedBlocksMBean;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import static org.apache.hadoop.util.Time.now;
 import static org.apache.hadoop.util.Time.monotonicNow;
@@ -243,10 +244,9 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer;
-import org.apache.hadoop.hdfs.server.namenode.metrics.ECBlockGroupsStatsMBean;
+import org.apache.hadoop.hdfs.server.namenode.metrics.ECBlockGroupsMBean;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
-import org.apache.hadoop.hdfs.server.namenode.metrics.ReplicatedBlocksStatsMBean;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
@@ -340,7 +340,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 @InterfaceAudience.Private
 @Metrics(context="dfs")
 public class FSNamesystem implements Namesystem, FSNamesystemMBean,
-    NameNodeMXBean, ReplicatedBlocksStatsMBean, ECBlockGroupsStatsMBean {
+    NameNodeMXBean, ReplicatedBlocksMBean, ECBlockGroupsMBean {
   public static final Log LOG = LogFactory.getLog(FSNamesystem.class);
   private final MetricsRegistry registry = new MetricsRegistry("FSNamesystem");
   @Metric final MutableRatesWithAggregation detailedLockHoldTimeMetrics =
@@ -4076,10 +4076,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * @see ClientProtocol#getBlocksStats()
    */
   BlocksStats getBlocksStats() {
-    return new BlocksStats(getLowRedundancyBlocksStat(),
-        getCorruptBlocksStat(), getMissingBlocksStat(),
-        getMissingReplicationOneBlocksStat(), getBlocksBytesInFutureStat(),
-        getPendingDeletionBlocksStat());
+    return new BlocksStats(getLowRedundancyReplicatedBlocks(),
+        getCorruptReplicatedBlocks(), getMissingReplicatedBlocks(),
+        getMissingReplicationOneBlocks(), getBytesInFutureReplicatedBlocks(),
+        getPendingDeletionReplicatedBlocks());
   }
 
   /**
@@ -4089,9 +4089,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * @see ClientProtocol#getECBlockGroupsStats()
    */
   ECBlockGroupsStats getECBlockGroupsStats() {
-    return new ECBlockGroupsStats(getLowRedundancyECBlockGroupsStat(),
-        getCorruptECBlockGroupsStat(), getMissingECBlockGroupsStat(),
-        getECBlocksBytesInFutureStat(), getPendingDeletionECBlockGroupsStat());
+    return new ECBlockGroupsStats(getLowRedundancyECBlockGroups(),
+        getCorruptECBlockGroups(), getMissingECBlockGroups(),
+        getBytesInFutureECBlockGroups(), getPendingDeletionECBlockGroups());
   }
 
   @Override // FSNamesystemMBean
@@ -4638,76 +4638,76 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   @Override // ReplicatedBlocksMBean
   @Metric({"LowRedundancyReplicatedBlocks",
       "Number of low redundancy replicated blocks"})
-  public long getLowRedundancyBlocksStat() {
-    return blockManager.getLowRedundancyBlocksStat();
+  public long getLowRedundancyReplicatedBlocks() {
+    return blockManager.getLowRedundancyBlocks();
   }
 
   @Override // ReplicatedBlocksMBean
   @Metric({"CorruptReplicatedBlocks", "Number of corrupted replicated blocks"})
-  public long getCorruptBlocksStat() {
-    return blockManager.getCorruptBlocksStat();
+  public long getCorruptReplicatedBlocks() {
+    return blockManager.getCorruptBlocks();
   }
 
   @Override // ReplicatedBlocksMBean
   @Metric({"MissingReplicatedBlocks", "Number of missing replicated blocks"})
-  public long getMissingBlocksStat() {
-    return blockManager.getMissingBlocksStat();
+  public long getMissingReplicatedBlocks() {
+    return blockManager.getMissingBlocks();
   }
 
   @Override // ReplicatedBlocksMBean
-  @Metric({"MissingReplicatedOneBlocks", "Number of missing replicated blocks" +
-      " with replication factor 1"})
-  public long getMissingReplicationOneBlocksStat() {
-    return blockManager.getMissingReplicationOneBlocksStat();
+  @Metric({"MissingReplicationOneBlocks", "Number of missing replicated " +
+      "blocks with replication factor 1"})
+  public long getMissingReplicationOneBlocks() {
+    return blockManager.getMissingReplicationOneBlocks();
   }
 
   @Override // ReplicatedBlocksMBean
-  @Metric({"BytesReplicatedFutureBlocks", "Total bytes in replicated blocks " +
-      "with future generation stamp"})
-  public long getBlocksBytesInFutureStat() {
-    return blockManager.getBytesInFutureReplicatedBlocksStat();
+  @Metric({"BytesInFutureReplicatedBlocks", "Total bytes in replicated " +
+      "blocks with future generation stamp"})
+  public long getBytesInFutureReplicatedBlocks() {
+    return blockManager.getBytesInFutureReplicatedBlocks();
   }
 
   @Override // ReplicatedBlocksMBean
   @Metric({"PendingDeletionReplicatedBlocks", "Number of replicated blocks " +
       "that are pending deletion"})
-  public long getPendingDeletionBlocksStat() {
-    return blockManager.getPendingDeletionBlocksStat();
+  public long getPendingDeletionReplicatedBlocks() {
+    return blockManager.getPendingDeletionReplicatedBlocks();
   }
 
-  @Override // ECBlockGroupsStatsMBean
+  @Override // ECBlockGroupsMBean
   @Metric({"LowRedundancyECBlockGroups", "Number of erasure coded block " +
       "groups with low redundancy"})
-  public long getLowRedundancyECBlockGroupsStat() {
-    return blockManager.getLowRedundancyECBlockGroupsStat();
+  public long getLowRedundancyECBlockGroups() {
+    return blockManager.getLowRedundancyECBlockGroups();
   }
 
-  @Override // ECBlockGroupsStatsMBean
+  @Override // ECBlockGroupsMBean
   @Metric({"CorruptECBlockGroups", "Number of erasure coded block groups that" +
       " are corrupt"})
-  public long getCorruptECBlockGroupsStat() {
-    return blockManager.getCorruptECBlockGroupsStat();
+  public long getCorruptECBlockGroups() {
+    return blockManager.getCorruptECBlockGroups();
   }
 
-  @Override // ECBlockGroupsStatsMBean
+  @Override // ECBlockGroupsMBean
   @Metric({"MissingECBlockGroups", "Number of erasure coded block groups that" +
       " are missing"})
-  public long getMissingECBlockGroupsStat() {
-    return blockManager.getMissingECBlockGroupsStat();
+  public long getMissingECBlockGroups() {
+    return blockManager.getMissingECBlockGroups();
   }
 
-  @Override // ECBlockGroupsStatsMBean
-  @Metric({"BytesFutureECBlockGroups", "Total bytes in erasure coded block " +
+  @Override // ECBlockGroupsMBean
+  @Metric({"BytesInFutureECBlockGroups", "Total bytes in erasure coded block " +
       "groups with future generation stamp"})
-  public long getECBlocksBytesInFutureStat() {
-    return blockManager.getBytesInFutureStripedBlocksStat();
+  public long getBytesInFutureECBlockGroups() {
+    return blockManager.getBytesInFutureECBlockGroups();
   }
 
-  @Override // ECBlockGroupsStatsMBean
+  @Override // ECBlockGroupsMBean
   @Metric({"PendingDeletionECBlockGroups", "Number of erasure coded block " +
       "groups that are pending deletion"})
-  public long getPendingDeletionECBlockGroupsStat() {
-    return blockManager.getPendingDeletionECBlockGroupsStat();
+  public long getPendingDeletionECBlockGroups() {
+    return blockManager.getPendingDeletionECBlockGroups();
   }
 
   @Override
@@ -4774,9 +4774,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * Register following MBeans with their respective names.
    * FSNamesystemMBean:
    *        "hadoop:service=NameNode,name=FSNamesystemState"
-   * ReplicatedBlocksStatsMBean:
+   * ReplicatedBlocksMBean:
    *        "hadoop:service=NameNode,name=ReplicatedBlocksState"
-   * ECBlockGroupsStatsMBean:
+   * ECBlockGroupsMBean:
    *        "hadoop:service=NameNode,name=ECBlockGroupsState"
    */
   private void registerMBean() {
@@ -4785,9 +4785,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       StandardMBean namesystemBean = new StandardMBean(
           this, FSNamesystemMBean.class);
       StandardMBean replicaBean = new StandardMBean(
-          this, ReplicatedBlocksStatsMBean.class);
+          this, ReplicatedBlocksMBean.class);
       StandardMBean ecBean = new StandardMBean(
-          this, ECBlockGroupsStatsMBean.class);
+          this, ECBlockGroupsMBean.class);
       namesystemMBeanName = MBeans.register(
           "NameNode", "FSNamesystemState", namesystemBean);
       replicatedBlocksMBeanName = MBeans.register(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/480c8db4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsMBean.java
new file mode 100644
index 0000000..5fa646a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsMBean.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.metrics;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * This interface defines the methods to get status pertaining to blocks of type
+ * {@link org.apache.hadoop.hdfs.protocol.BlockType#STRIPED} in FSNamesystem
+ * of a NameNode. It is also used for publishing via JMX.
+ * <p>
+ * Aggregated status of all blocks is reported in
+ * @see FSNamesystemMBean
+ * Name Node runtime activity statistic info is reported in
+ * @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics
+ *
+ */
+@InterfaceAudience.Private
+public interface ECBlockGroupsMBean {
+  /**
+   * Return count of erasure coded block groups with low redundancy.
+   */
+  long getLowRedundancyECBlockGroups();
+
+  /**
+   * Return count of erasure coded block groups that are corrupt.
+   */
+  long getCorruptECBlockGroups();
+
+  /**
+   * Return count of erasure coded block groups that are missing.
+   */
+  long getMissingECBlockGroups();
+
+  /**
+   * Return total bytes of erasure coded future block groups.
+   */
+  long getBytesInFutureECBlockGroups();
+
+  /**
+   * Return count of erasure coded block groups that are pending deletion.
+   */
+  long getPendingDeletionECBlockGroups();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/480c8db4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsStatsMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsStatsMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsStatsMBean.java
deleted file mode 100644
index f9fd416..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsStatsMBean.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode.metrics;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-/**
- * This interface defines the methods to get status pertaining to blocks of type
- * {@link org.apache.hadoop.hdfs.protocol.BlockType#STRIPED} in FSNamesystem
- * of a NameNode. It is also used for publishing via JMX.
- * <p>
- * Aggregated status of all blocks is reported in
- * @see FSNamesystemMBean
- * Name Node runtime activity statistic info is reported in
- * @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics
- *
- */
-@InterfaceAudience.Private
-public interface ECBlockGroupsStatsMBean {
-  /**
-   * Return count of erasure coded block groups with low redundancy.
-   */
-  long getLowRedundancyECBlockGroupsStat();
-
-  /**
-   * Return count of erasure coded block groups that are corrupt.
-   */
-  long getCorruptECBlockGroupsStat();
-
-  /**
-   * Return count of erasure coded block groups that are missing.
-   */
-  long getMissingECBlockGroupsStat();
-
-  /**
-   * Return total bytes of erasure coded future block groups.
-   */
-  long getECBlocksBytesInFutureStat();
-
-  /**
-   * Return count of erasure coded block groups that are pending deletion.
-   */
-  long getPendingDeletionECBlockGroupsStat();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/480c8db4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksMBean.java
new file mode 100644
index 0000000..e2c924e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksMBean.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.metrics;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * This interface defines the methods to get status pertaining to blocks of type
+ * {@link org.apache.hadoop.hdfs.protocol.BlockType#CONTIGUOUS} in FSNamesystem
+ * of a NameNode. It is also used for publishing via JMX.
+ * <p>
+ * Aggregated status of all blocks is reported in
+ * @see FSNamesystemMBean
+ * Name Node runtime activity statistic info is reported in
+ * @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics
+ */
+@InterfaceAudience.Private
+public interface ReplicatedBlocksMBean {
+  /**
+   * Return low redundancy blocks count.
+   */
+  long getLowRedundancyReplicatedBlocks();
+
+  /**
+   * Return corrupt blocks count.
+   */
+  long getCorruptReplicatedBlocks();
+
+  /**
+   * Return missing blocks count.
+   */
+  long getMissingReplicatedBlocks();
+
+  /**
+   * Return count of missing blocks with replication factor one.
+   */
+  long getMissingReplicationOneBlocks();
+
+  /**
+   * Return total bytes of future blocks.
+   */
+  long getBytesInFutureReplicatedBlocks();
+
+  /**
+   * Return count of blocks that are pending deletion.
+   */
+  long getPendingDeletionReplicatedBlocks();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/480c8db4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksStatsMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksStatsMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksStatsMBean.java
deleted file mode 100644
index 4643b80..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksStatsMBean.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode.metrics;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-/**
- * This interface defines the methods to get status pertaining to blocks of type
- * {@link org.apache.hadoop.hdfs.protocol.BlockType#CONTIGUOUS} in FSNamesystem
- * of a NameNode. It is also used for publishing via JMX.
- * <p>
- * Aggregated status of all blocks is reported in
- * @see FSNamesystemMBean
- * Name Node runtime activity statistic info is reported in
- * @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics
- */
-@InterfaceAudience.Private
-public interface ReplicatedBlocksStatsMBean {
-  /**
-   * Return low redundancy blocks count.
-   */
-  long getLowRedundancyBlocksStat();
-
-  /**
-   * Return corrupt blocks count.
-   */
-  long getCorruptBlocksStat();
-
-  /**
-   * Return missing blocks count.
-   */
-  long getMissingBlocksStat();
-
-  /**
-   * Return count of missing blocks with replication factor one.
-   */
-  long getMissingReplicationOneBlocksStat();
-
-  /**
-   * Return total bytes of future blocks.
-   */
-  long getBlocksBytesInFutureStat();
-
-  /**
-   * Return count of blocks that are pending deletion.
-   */
-  long getPendingDeletionBlocksStat();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/480c8db4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
index c556699..623c444 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
@@ -424,9 +424,9 @@ public class TestAddStripedBlocks {
         cluster.getDataNodes().get(3).getDatanodeId(), reports[0]);
     BlockManagerTestUtil.updateState(ns.getBlockManager());
     // the total number of corrupted block info is still 1
-    Assert.assertEquals(1, ns.getCorruptECBlockGroupsStat());
+    Assert.assertEquals(1, ns.getCorruptECBlockGroups());
     Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
-    Assert.assertEquals(0, ns.getCorruptBlocksStat());
+    Assert.assertEquals(0, ns.getCorruptReplicatedBlocks());
     // 2 internal blocks corrupted
     Assert.assertEquals(2, bm.getCorruptReplicas(stored).size());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/480c8db4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index 32c2a49..63f9113 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -809,10 +809,10 @@ public class TestNameNodeMXBean {
       long totalMissingBlocks = cluster.getNamesystem().getMissingBlocksCount();
       Long replicaMissingBlocks =
           (Long) mbs.getAttribute(replStateMBeanName,
-              "MissingBlocksStat");
+              "MissingReplicatedBlocks");
       Long ecMissingBlocks =
           (Long) mbs.getAttribute(ecBlkGrpStateMBeanName,
-              "MissingECBlockGroupsStat");
+              "MissingECBlockGroups");
       assertEquals("Unexpected total missing blocks!",
           expectedMissingBlockCount, totalMissingBlocks);
       assertEquals("Unexpected total missing blocks!",
@@ -826,10 +826,10 @@ public class TestNameNodeMXBean {
           cluster.getNamesystem().getCorruptReplicaBlocks();
       Long replicaCorruptBlocks =
           (Long) mbs.getAttribute(replStateMBeanName,
-              "CorruptBlocksStat");
+              "CorruptReplicatedBlocks");
       Long ecCorruptBlocks =
           (Long) mbs.getAttribute(ecBlkGrpStateMBeanName,
-              "CorruptECBlockGroupsStat");
+              "CorruptECBlockGroups");
       assertEquals("Unexpected total corrupt blocks!",
           expectedCorruptBlockCount, totalCorruptBlocks);
       assertEquals("Unexpected total corrupt blocks!",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/480c8db4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
index 540ae63..02075f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
@@ -419,7 +419,7 @@ public class TestReconstructStripedBlocks {
 
       // Verify low redundancy count matching EC block groups count
       BlockManagerTestUtil.updateState(bm);
-      assertEquals(blockGroups, bm.getLowRedundancyECBlockGroupsStat());
+      assertEquals(blockGroups, bm.getLowRedundancyECBlockGroups());
       DFSTestUtil.verifyClientStats(conf, dfsCluster);
 
 
@@ -429,7 +429,7 @@ public class TestReconstructStripedBlocks {
 
       // Verify pending reconstruction count
       assertEquals(blockGroups, getNumberOfBlocksToBeErasureCoded(dfsCluster));
-      assertEquals(0, bm.getLowRedundancyECBlockGroupsStat());
+      assertEquals(0, bm.getLowRedundancyECBlockGroups());
       DFSTestUtil.verifyClientStats(conf, dfsCluster);
     } finally {
       dfsCluster.shutdown();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/480c8db4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index f598d8c..c3bb255 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -317,27 +317,27 @@ public class TestNameNodeMetrics {
         namesystem.getUnderReplicatedBlocks());
     assertEquals("Low redundancy metrics not matching!",
         namesystem.getLowRedundancyBlocks(),
-        namesystem.getLowRedundancyBlocksStat() +
-            namesystem.getLowRedundancyECBlockGroupsStat());
+        namesystem.getLowRedundancyReplicatedBlocks() +
+            namesystem.getLowRedundancyECBlockGroups());
     assertEquals("Corrupt blocks metrics not matching!",
         namesystem.getCorruptReplicaBlocks(),
-        namesystem.getCorruptBlocksStat() +
-            namesystem.getCorruptECBlockGroupsStat());
+        namesystem.getCorruptReplicatedBlocks() +
+            namesystem.getCorruptECBlockGroups());
     assertEquals("Missing blocks metrics not matching!",
         namesystem.getMissingBlocksCount(),
-        namesystem.getMissingBlocksStat() +
-            namesystem.getMissingECBlockGroupsStat());
+        namesystem.getMissingReplicatedBlocks() +
+            namesystem.getMissingECBlockGroups());
     assertEquals("Missing blocks with replication factor one not matching!",
         namesystem.getMissingReplOneBlocksCount(),
-        namesystem.getMissingReplicationOneBlocksStat());
+        namesystem.getMissingReplicationOneBlocks());
     assertEquals("Bytes in future blocks metrics not matching!",
         namesystem.getBytesInFuture(),
-        namesystem.getBlocksBytesInFutureStat() +
-            namesystem.getECBlocksBytesInFutureStat());
+        namesystem.getBytesInFutureReplicatedBlocks() +
+            namesystem.getBytesInFutureECBlockGroups());
     assertEquals("Pending deletion blocks metrics not matching!",
         namesystem.getPendingDeletionBlocks(),
-        namesystem.getPendingDeletionBlocksStat() +
-            namesystem.getPendingDeletionECBlockGroupsStat());
+        namesystem.getPendingDeletionReplicatedBlocks() +
+            namesystem.getPendingDeletionECBlockGroups());
   }
 
   /** Corrupt a block and ensure metrics reflects it */


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: YARN-6897. Refactoring RMWebServices by moving some util methods to RMWebAppUtil. (Giovanni Matteo Fumarola via Subru).

Posted by xg...@apache.org.
YARN-6897. Refactoring RMWebServices by moving some util methods to RMWebAppUtil. (Giovanni Matteo Fumarola via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcde66be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcde66be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcde66be

Branch: refs/heads/YARN-5734
Commit: bcde66bed1e41b5644811fe90bfbf3d56827db36
Parents: 713349a
Author: Subru Krishnan <su...@apache.org>
Authored: Fri Jul 28 15:44:36 2017 -0700
Committer: Subru Krishnan <su...@apache.org>
Committed: Fri Jul 28 15:44:36 2017 -0700

----------------------------------------------------------------------
 .../resourcemanager/webapp/RMWebAppUtil.java    | 195 +++++++++++++++++++
 .../resourcemanager/webapp/RMWebServices.java   | 179 +----------------
 2 files changed, 201 insertions(+), 173 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcde66be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppUtil.java
index 263828b..57805ee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppUtil.java
@@ -18,21 +18,45 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
+import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
 
+import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.lib.StaticUserWebFilter;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.AuthenticationFilterInitializer;
+import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.HttpCrossOriginFilterInitializer;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LogAggregationContext;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.ReservationId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CredentialsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LocalResourceInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LogAggregationContextInfo;
 import org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilter;
 import org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilterInitializer;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
 
 /**
  * Util class for ResourceManager WebApp.
@@ -146,4 +170,175 @@ public final class RMWebAppUtil {
       }
     }
   }
+
+  /**
+   * Create the actual ApplicationSubmissionContext to be submitted to the RM
+   * from the information provided by the user.
+   *
+   * @param newApp the information provided by the user
+   * @param conf RM configuration
+   * @return returns the constructed ApplicationSubmissionContext
+   * @throws IOException in case of Error
+   */
+  public static ApplicationSubmissionContext createAppSubmissionContext(
+      ApplicationSubmissionContextInfo newApp, Configuration conf)
+      throws IOException {
+
+    // create local resources and app submission context
+
+    ApplicationId appid;
+    String error =
+        "Could not parse application id " + newApp.getApplicationId();
+    try {
+      appid = ApplicationId.fromString(newApp.getApplicationId());
+    } catch (Exception e) {
+      throw new BadRequestException(error);
+    }
+    ApplicationSubmissionContext appContext = ApplicationSubmissionContext
+        .newInstance(appid, newApp.getApplicationName(), newApp.getQueue(),
+            Priority.newInstance(newApp.getPriority()),
+            createContainerLaunchContext(newApp), newApp.getUnmanagedAM(),
+            newApp.getCancelTokensWhenComplete(), newApp.getMaxAppAttempts(),
+            createAppSubmissionContextResource(newApp, conf),
+            newApp.getApplicationType(),
+            newApp.getKeepContainersAcrossApplicationAttempts(),
+            newApp.getAppNodeLabelExpression(),
+            newApp.getAMContainerNodeLabelExpression());
+    appContext.setApplicationTags(newApp.getApplicationTags());
+    appContext.setAttemptFailuresValidityInterval(
+        newApp.getAttemptFailuresValidityInterval());
+    if (newApp.getLogAggregationContextInfo() != null) {
+      appContext.setLogAggregationContext(
+          createLogAggregationContext(newApp.getLogAggregationContextInfo()));
+    }
+    String reservationIdStr = newApp.getReservationId();
+    if (reservationIdStr != null && !reservationIdStr.isEmpty()) {
+      ReservationId reservationId =
+          ReservationId.parseReservationId(reservationIdStr);
+      appContext.setReservationID(reservationId);
+    }
+    return appContext;
+  }
+
+  /**
+   * Create the actual Resource inside the ApplicationSubmissionContextInfo to
+   * be submitted to the RM from the information provided by the user.
+   *
+   * @param newApp the information provided by the user
+   * @param conf RM configuration
+   * @return returns the constructed Resource inside the
+   *         ApplicationSubmissionContextInfo
+   * @throws BadRequestException
+   */
+  private static Resource createAppSubmissionContextResource(
+      ApplicationSubmissionContextInfo newApp, Configuration conf)
+      throws BadRequestException {
+    if (newApp.getResource().getvCores() > conf.getInt(
+        YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES)) {
+      String msg = "Requested more cores than configured max";
+      throw new BadRequestException(msg);
+    }
+    if (newApp.getResource().getMemorySize() > conf.getInt(
+        YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB)) {
+      String msg = "Requested more memory than configured max";
+      throw new BadRequestException(msg);
+    }
+    Resource r = Resource.newInstance(newApp.getResource().getMemorySize(),
+        newApp.getResource().getvCores());
+    return r;
+  }
+
+  /**
+   * Create the ContainerLaunchContext required for the
+   * ApplicationSubmissionContext. This function takes the user information and
+   * generates the ByteBuffer structures required by the ContainerLaunchContext
+   *
+   * @param newApp the information provided by the user
+   * @return created context
+   * @throws BadRequestException
+   * @throws IOException
+   */
+  private static ContainerLaunchContext createContainerLaunchContext(
+      ApplicationSubmissionContextInfo newApp)
+      throws BadRequestException, IOException {
+
+    // create container launch context
+
+    HashMap<String, ByteBuffer> hmap = new HashMap<String, ByteBuffer>();
+    for (Map.Entry<String, String> entry : newApp
+        .getContainerLaunchContextInfo().getAuxillaryServiceData().entrySet()) {
+      if (!entry.getValue().isEmpty()) {
+        Base64 decoder = new Base64(0, null, true);
+        byte[] data = decoder.decode(entry.getValue());
+        hmap.put(entry.getKey(), ByteBuffer.wrap(data));
+      }
+    }
+
+    HashMap<String, LocalResource> hlr = new HashMap<String, LocalResource>();
+    for (Map.Entry<String, LocalResourceInfo> entry : newApp
+        .getContainerLaunchContextInfo().getResources().entrySet()) {
+      LocalResourceInfo l = entry.getValue();
+      LocalResource lr = LocalResource.newInstance(URL.fromURI(l.getUrl()),
+          l.getType(), l.getVisibility(), l.getSize(), l.getTimestamp());
+      hlr.put(entry.getKey(), lr);
+    }
+
+    DataOutputBuffer out = new DataOutputBuffer();
+    Credentials cs = createCredentials(
+        newApp.getContainerLaunchContextInfo().getCredentials());
+    cs.writeTokenStorageToStream(out);
+    ByteBuffer tokens = ByteBuffer.wrap(out.getData());
+
+    ContainerLaunchContext ctx = ContainerLaunchContext.newInstance(hlr,
+        newApp.getContainerLaunchContextInfo().getEnvironment(),
+        newApp.getContainerLaunchContextInfo().getCommands(), hmap, tokens,
+        newApp.getContainerLaunchContextInfo().getAcls());
+
+    return ctx;
+  }
+
+  /**
+   * Generate a Credentials object from the information in the CredentialsInfo
+   * object.
+   *
+   * @param credentials the CredentialsInfo provided by the user.
+   * @return
+   */
+  private static Credentials createCredentials(CredentialsInfo credentials) {
+    Credentials ret = new Credentials();
+    try {
+      for (Map.Entry<String, String> entry : credentials.getTokens()
+          .entrySet()) {
+        Text alias = new Text(entry.getKey());
+        Token<TokenIdentifier> token = new Token<TokenIdentifier>();
+        token.decodeFromUrlString(entry.getValue());
+        ret.addToken(alias, token);
+      }
+      for (Map.Entry<String, String> entry : credentials.getSecrets()
+          .entrySet()) {
+        Text alias = new Text(entry.getKey());
+        Base64 decoder = new Base64(0, null, true);
+        byte[] secret = decoder.decode(entry.getValue());
+        ret.addSecretKey(alias, secret);
+      }
+    } catch (IOException ie) {
+      throw new BadRequestException(
+          "Could not parse credentials data; exception message = "
+              + ie.getMessage());
+    }
+    return ret;
+  }
+
+  private static LogAggregationContext createLogAggregationContext(
+      LogAggregationContextInfo logAggregationContextInfo) {
+    return LogAggregationContext.newInstance(
+        logAggregationContextInfo.getIncludePattern(),
+        logAggregationContextInfo.getExcludePattern(),
+        logAggregationContextInfo.getRolledLogsIncludePattern(),
+        logAggregationContextInfo.getRolledLogsExcludePattern(),
+        logAggregationContextInfo.getLogAggregationPolicyClassName(),
+        logAggregationContextInfo.getLogAggregationPolicyParameters());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcde66be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 7c053bf..c537b7e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
 import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
-import java.nio.ByteBuffer;
 import java.security.AccessControlException;
 import java.security.Principal;
 import java.security.PrivilegedExceptionAction;
@@ -57,22 +56,18 @@ import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response.Status;
 
-import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.http.JettyUtils;
-import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
@@ -104,10 +99,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
-import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.LocalResource;
-import org.apache.hadoop.yarn.api.records.LogAggregationContext;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.NodeState;
@@ -119,7 +111,6 @@ import org.apache.hadoop.yarn.api.records.ReservationRequest;
 import org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter;
 import org.apache.hadoop.yarn.api.records.ReservationRequests;
 import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -136,33 +127,34 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppPriority;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationStatisticsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CredentialsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FairSchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LocalResourceInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LogAggregationContextInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NewApplication;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NewReservation;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
@@ -185,7 +177,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.StatisticsItemInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.*;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.server.webapp.WebServices;
@@ -1589,7 +1580,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
     }
 
     ApplicationSubmissionContext appContext =
-        createAppSubmissionContext(newApp);
+        RMWebAppUtil.createAppSubmissionContext(newApp, conf);
 
     final SubmitApplicationRequest req =
         SubmitApplicationRequest.newInstance(appContext);
@@ -1640,153 +1631,6 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
     return appId;
   }
 
-  /**
-   * Create the actual ApplicationSubmissionContext to be submitted to the RM
-   * from the information provided by the user.
-   * 
-   * @param newApp the information provided by the user
-   * @return returns the constructed ApplicationSubmissionContext
-   * @throws IOException
-   */
-  protected ApplicationSubmissionContext createAppSubmissionContext(
-      ApplicationSubmissionContextInfo newApp) throws IOException {
-
-    // create local resources and app submission context
-
-    ApplicationId appid;
-    String error =
-        "Could not parse application id " + newApp.getApplicationId();
-    try {
-      appid = ApplicationId.fromString(newApp.getApplicationId());
-    } catch (Exception e) {
-      throw new BadRequestException(error);
-    }
-    ApplicationSubmissionContext appContext = ApplicationSubmissionContext
-        .newInstance(appid, newApp.getApplicationName(), newApp.getQueue(),
-            Priority.newInstance(newApp.getPriority()),
-            createContainerLaunchContext(newApp), newApp.getUnmanagedAM(),
-            newApp.getCancelTokensWhenComplete(), newApp.getMaxAppAttempts(),
-            createAppSubmissionContextResource(newApp),
-            newApp.getApplicationType(),
-            newApp.getKeepContainersAcrossApplicationAttempts(),
-            newApp.getAppNodeLabelExpression(),
-            newApp.getAMContainerNodeLabelExpression());
-    appContext.setApplicationTags(newApp.getApplicationTags());
-    appContext.setAttemptFailuresValidityInterval(
-        newApp.getAttemptFailuresValidityInterval());
-    if (newApp.getLogAggregationContextInfo() != null) {
-      appContext.setLogAggregationContext(
-          createLogAggregationContext(newApp.getLogAggregationContextInfo()));
-    }
-    String reservationIdStr = newApp.getReservationId();
-    if (reservationIdStr != null && !reservationIdStr.isEmpty()) {
-      ReservationId reservationId =
-          ReservationId.parseReservationId(reservationIdStr);
-      appContext.setReservationID(reservationId);
-    }
-    return appContext;
-  }
-
-  protected Resource createAppSubmissionContextResource(
-      ApplicationSubmissionContextInfo newApp) throws BadRequestException {
-    if (newApp.getResource().getvCores() > rm.getConfig().getInt(
-        YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
-        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES)) {
-      String msg = "Requested more cores than configured max";
-      throw new BadRequestException(msg);
-    }
-    if (newApp.getResource().getMemorySize() > rm.getConfig().getInt(
-        YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
-        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB)) {
-      String msg = "Requested more memory than configured max";
-      throw new BadRequestException(msg);
-    }
-    Resource r = Resource.newInstance(newApp.getResource().getMemorySize(),
-        newApp.getResource().getvCores());
-    return r;
-  }
-
-  /**
-   * Create the ContainerLaunchContext required for the
-   * ApplicationSubmissionContext. This function takes the user information and
-   * generates the ByteBuffer structures required by the ContainerLaunchContext
-   * 
-   * @param newApp the information provided by the user
-   * @return created context
-   * @throws BadRequestException
-   * @throws IOException
-   */
-  protected ContainerLaunchContext createContainerLaunchContext(
-      ApplicationSubmissionContextInfo newApp)
-      throws BadRequestException, IOException {
-
-    // create container launch context
-
-    HashMap<String, ByteBuffer> hmap = new HashMap<String, ByteBuffer>();
-    for (Map.Entry<String, String> entry : newApp
-        .getContainerLaunchContextInfo().getAuxillaryServiceData().entrySet()) {
-      if (entry.getValue().isEmpty() == false) {
-        Base64 decoder = new Base64(0, null, true);
-        byte[] data = decoder.decode(entry.getValue());
-        hmap.put(entry.getKey(), ByteBuffer.wrap(data));
-      }
-    }
-
-    HashMap<String, LocalResource> hlr = new HashMap<String, LocalResource>();
-    for (Map.Entry<String, LocalResourceInfo> entry : newApp
-        .getContainerLaunchContextInfo().getResources().entrySet()) {
-      LocalResourceInfo l = entry.getValue();
-      LocalResource lr = LocalResource.newInstance(URL.fromURI(l.getUrl()),
-          l.getType(), l.getVisibility(), l.getSize(), l.getTimestamp());
-      hlr.put(entry.getKey(), lr);
-    }
-
-    DataOutputBuffer out = new DataOutputBuffer();
-    Credentials cs = createCredentials(
-        newApp.getContainerLaunchContextInfo().getCredentials());
-    cs.writeTokenStorageToStream(out);
-    ByteBuffer tokens = ByteBuffer.wrap(out.getData());
-
-    ContainerLaunchContext ctx = ContainerLaunchContext.newInstance(hlr,
-        newApp.getContainerLaunchContextInfo().getEnvironment(),
-        newApp.getContainerLaunchContextInfo().getCommands(), hmap, tokens,
-        newApp.getContainerLaunchContextInfo().getAcls());
-
-    return ctx;
-  }
-
-  /**
-   * Generate a Credentials object from the information in the CredentialsInfo
-   * object.
-   * 
-   * @param credentials the CredentialsInfo provided by the user.
-   * @return
-   */
-  private Credentials createCredentials(CredentialsInfo credentials) {
-    Credentials ret = new Credentials();
-    try {
-      for (Map.Entry<String, String> entry : credentials.getTokens()
-          .entrySet()) {
-        Text alias = new Text(entry.getKey());
-        Token<TokenIdentifier> token = new Token<TokenIdentifier>();
-        token.decodeFromUrlString(entry.getValue());
-        ret.addToken(alias, token);
-      }
-      for (Map.Entry<String, String> entry : credentials.getSecrets()
-          .entrySet()) {
-        Text alias = new Text(entry.getKey());
-        Base64 decoder = new Base64(0, null, true);
-        byte[] secret = decoder.decode(entry.getValue());
-        ret.addSecretKey(alias, secret);
-      }
-    } catch (IOException ie) {
-      throw new BadRequestException(
-          "Could not parse credentials data; exception message = "
-              + ie.getMessage());
-    }
-    return ret;
-  }
-
   private UserGroupInformation createKerberosUserGroupInformation(
       HttpServletRequest hsr) throws AuthorizationException, YarnException {
 
@@ -1815,17 +1659,6 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
     return callerUGI;
   }
 
-  private LogAggregationContext createLogAggregationContext(
-      LogAggregationContextInfo logAggregationContextInfo) {
-    return LogAggregationContext.newInstance(
-        logAggregationContextInfo.getIncludePattern(),
-        logAggregationContextInfo.getExcludePattern(),
-        logAggregationContextInfo.getRolledLogsIncludePattern(),
-        logAggregationContextInfo.getRolledLogsExcludePattern(),
-        logAggregationContextInfo.getLogAggregationPolicyClassName(),
-        logAggregationContextInfo.getLogAggregationPolicyParameters());
-  }
-
   @POST
   @Path(RMWSConsts.DELEGATION_TOKEN)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: HDFS-12143. Improve performance of getting and removing inode features. Contributed by Daryn Sharp.

Posted by xg...@apache.org.
HDFS-12143. Improve performance of getting and removing inode features. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a79dcfc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a79dcfc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a79dcfc

Branch: refs/heads/YARN-5734
Commit: 1a79dcfc457969d6a6c08ffffe4152fd7638e48a
Parents: cca51e9
Author: Kihwal Lee <ki...@apache.org>
Authored: Tue Jul 25 10:28:57 2017 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Tue Jul 25 10:28:57 2017 -0500

----------------------------------------------------------------------
 .../namenode/INodeWithAdditionalFields.java     | 24 ++++++++++++++------
 1 file changed, 17 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a79dcfc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
index fe58577..9adcc3e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
@@ -283,12 +283,14 @@ public abstract class INodeWithAdditionalFields extends INode
 
   protected void removeFeature(Feature f) {
     int size = features.length;
-    Preconditions.checkState(size > 0, "Feature "
-        + f.getClass().getSimpleName() + " not found.");
+    if (size == 0) {
+      throwFeatureNotFoundException(f);
+    }
 
     if (size == 1) {
-      Preconditions.checkState(features[0] == f, "Feature "
-          + f.getClass().getSimpleName() + " not found.");
+      if (features[0] != f) {
+        throwFeatureNotFoundException(f);
+      }
       features = EMPTY_FEATURE;
       return;
     }
@@ -307,14 +309,22 @@ public abstract class INodeWithAdditionalFields extends INode
       }
     }
 
-    Preconditions.checkState(!overflow && j == size - 1, "Feature "
-        + f.getClass().getSimpleName() + " not found.");
+    if (overflow || j != size - 1) {
+      throwFeatureNotFoundException(f);
+    }
     features = arr;
   }
 
+  private void throwFeatureNotFoundException(Feature f) {
+    throw new IllegalStateException(
+        "Feature " + f.getClass().getSimpleName() + " not found.");
+  }
+
   protected <T extends Feature> T getFeature(Class<? extends Feature> clazz) {
     Preconditions.checkArgument(clazz != null);
-    for (Feature f : features) {
+    final int size = features.length;
+    for (int i=0; i < size; i++) {
+      Feature f = features[i];
       if (clazz.isAssignableFrom(f.getClass())) {
         @SuppressWarnings("unchecked")
         T ret = (T) f;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: HADOOP-11875. [JDK9] Adding a second copy of Hamlet without _ as a one-character identifier.

Posted by xg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
index 40e1e94..82ddb54 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
@@ -45,10 +45,10 @@ import org.apache.hadoop.yarn.server.webapp.AppAttemptBlock;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.util.resource.Resources;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
@@ -86,7 +86,7 @@ public class RMAppAttemptBlock extends AppAttemptBlock{
         .th(".resource", "ResourceName").th(".capacity", "Capability")
         .th(".containers", "NumContainers")
         .th(".relaxlocality", "RelaxLocality")
-        .th(".labelexpression", "NodeLabelExpression")._()._().tbody();
+        .th(".labelexpression", "NodeLabelExpression").__().__().tbody();
 
     StringBuilder resourceRequestTableData = new StringBuilder("[\n");
     for (ResourceRequestInfo resourceRequest  : resourceRequests) {
@@ -114,9 +114,9 @@ public class RMAppAttemptBlock extends AppAttemptBlock{
     }
     resourceRequestTableData.append("]");
     html.script().$type("text/javascript")
-        ._("var resourceRequestsTableData=" + resourceRequestTableData)._();
-    tbody._()._();
-    div._();
+        .__("var resourceRequestsTableData=" + resourceRequestTableData).__();
+    tbody.__().__();
+    div.__();
   }
 
   private Resource getTotalResource(List<ResourceRequestInfo> requests) {
@@ -163,7 +163,7 @@ public class RMAppAttemptBlock extends AppAttemptBlock{
         th(_TH, "Node Local Request").
         th(_TH, "Rack Local Request").
         th(_TH, "Off Switch Request").
-      _();
+        __();
 
     String[] containersType =
         { "Num Node Local Containers (satisfied by)", "Num Rack Local Containers (satisfied by)",
@@ -173,10 +173,10 @@ public class RMAppAttemptBlock extends AppAttemptBlock{
       table.tr((odd = !odd) ? _ODD : _EVEN).td(containersType[i])
         .td(String.valueOf(attemptMetrics.getLocalityStatistics()[i][0]))
         .td(i == 0 ? "" : String.valueOf(attemptMetrics.getLocalityStatistics()[i][1]))
-        .td(i <= 1 ? "" : String.valueOf(attemptMetrics.getLocalityStatistics()[i][2]))._();
+        .td(i <= 1 ? "" : String.valueOf(attemptMetrics.getLocalityStatistics()[i][2])).__();
     }
-    table._();
-    div._();
+    table.__();
+    div.__();
   }
 
   private boolean isApplicationInFinalState(YarnApplicationAttemptState state) {
@@ -192,12 +192,12 @@ public class RMAppAttemptBlock extends AppAttemptBlock{
       if (!isApplicationInFinalState(YarnApplicationAttemptState
           .valueOf(attempt.getAppAttemptState().toString()))) {
         RMAppAttemptMetrics metrics = attempt.getRMAppAttemptMetrics();
-        DIV<Hamlet> pdiv = html._(InfoBlock.class).div(_INFO_WRAP);
+        DIV<Hamlet> pdiv = html.__(InfoBlock.class).div(_INFO_WRAP);
         info("Application Attempt Overview").clear();
-        info("Application Attempt Metrics")._(
+        info("Application Attempt Metrics").__(
           "Application Attempt Headroom : ", metrics == null ? "N/A" :
             metrics.getApplicationAttemptHeadroom());
-        pdiv._();
+        pdiv.__();
       }
     }
   }
@@ -226,23 +226,23 @@ public class RMAppAttemptBlock extends AppAttemptBlock{
           .getBlacklistUpdates().getBlacklistAdditions());
 
     info("Application Attempt Overview")
-      ._(
+      .__(
         "Application Attempt State:",
         appAttempt.getAppAttemptState() == null ? UNAVAILABLE : appAttempt
           .getAppAttemptState())
-        ._("Started:", Times.format(appAttempt.getStartedTime()))
-        ._("Elapsed:",
+        .__("Started:", Times.format(appAttempt.getStartedTime()))
+        .__("Elapsed:",
             org.apache.hadoop.util.StringUtils.formatTime(Times.elapsed(
                 appAttempt.getStartedTime(), appAttempt.getFinishedTime())))
-      ._(
+      .__(
         "AM Container:",
         appAttempt.getAmContainerId() == null || containers == null
             || !hasAMContainer(appAttemptReport.getAMContainerId(), containers)
             ? null : root_url("container", appAttempt.getAmContainerId()),
         appAttempt.getAmContainerId() == null ? "N/A" :
           String.valueOf(appAttempt.getAmContainerId()))
-      ._("Node:", node)
-      ._(
+      .__("Node:", node)
+      .__(
         "Tracking URL:",
         appAttempt.getTrackingUrl() == null
             || appAttempt.getTrackingUrl().equals(UNAVAILABLE) ? null
@@ -254,12 +254,12 @@ public class RMAppAttemptBlock extends AppAttemptBlock{
                 || appAttempt.getAppAttemptState() == YarnApplicationAttemptState.FAILED
                 || appAttempt.getAppAttemptState() == YarnApplicationAttemptState.KILLED
                 ? "History" : "ApplicationMaster")
-      ._(
+      .__(
         "Diagnostics Info:",
         appAttempt.getDiagnosticsInfo() == null ? "" : appAttempt
           .getDiagnosticsInfo())
-      ._("Nodes blacklisted by the application:", appBlacklistedNodes)
-      ._("Nodes blacklisted by the system:", rmBlackListedNodes);
+      .__("Nodes blacklisted by the application:", appBlacklistedNodes)
+      .__("Nodes blacklisted by the system:", rmBlackListedNodes);
   }
 
   private String getNodeString(Collection<String> nodes) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
index e5d6c16..cd04264 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
@@ -37,8 +37,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptM
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.AppBlock;
 import org.apache.hadoop.yarn.util.resource.Resources;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
@@ -82,33 +82,33 @@ public class RMAppBlock extends AppBlock{
         attemptMetrics == null ? 0 : attemptMetrics
           .getNumNonAMContainersPreempted();
     DIV<Hamlet> pdiv = html.
-        _(InfoBlock.class).
+        __(InfoBlock.class).
         div(_INFO_WRAP);
     info("Application Overview").clear();
     info("Application Metrics")
-        ._("Total Resource Preempted:",
+        .__("Total Resource Preempted:",
           appMetrics == null ? "N/A" : appMetrics.getResourcePreempted())
-        ._("Total Number of Non-AM Containers Preempted:",
+        .__("Total Number of Non-AM Containers Preempted:",
           appMetrics == null ? "N/A"
               : appMetrics.getNumNonAMContainersPreempted())
-        ._("Total Number of AM Containers Preempted:",
+        .__("Total Number of AM Containers Preempted:",
           appMetrics == null ? "N/A"
               : appMetrics.getNumAMContainersPreempted())
-        ._("Resource Preempted from Current Attempt:",
+        .__("Resource Preempted from Current Attempt:",
           attemptResourcePreempted)
-        ._("Number of Non-AM Containers Preempted from Current Attempt:",
+        .__("Number of Non-AM Containers Preempted from Current Attempt:",
           attemptNumNonAMContainerPreempted)
-        ._("Aggregate Resource Allocation:",
+        .__("Aggregate Resource Allocation:",
           String.format("%d MB-seconds, %d vcore-seconds",
               appMetrics == null ? "N/A" : appMetrics.getMemorySeconds(),
               appMetrics == null ? "N/A" : appMetrics.getVcoreSeconds()))
-        ._("Aggregate Preempted Resource Allocation:",
+        .__("Aggregate Preempted Resource Allocation:",
           String.format("%d MB-seconds, %d vcore-seconds",
             appMetrics == null ? "N/A" : appMetrics.getPreemptedMemorySeconds(),
             appMetrics == null ? "N/A" :
                 appMetrics.getPreemptedVcoreSeconds()));
 
-    pdiv._();
+    pdiv.__();
   }
 
   @Override
@@ -122,7 +122,7 @@ public class RMAppBlock extends AppBlock{
             .th(".appBlacklistednodes", "Nodes blacklisted by the application",
                 "Nodes blacklisted by the app")
             .th(".rmBlacklistednodes", "Nodes blacklisted by the RM for the"
-                + " app", "Nodes blacklisted by the system")._()._().tbody();
+                + " app", "Nodes blacklisted by the system").__().__().tbody();
 
     RMApp rmApp = this.rm.getRMContext().getRMApps().get(this.appID);
     if (rmApp == null) {
@@ -174,9 +174,9 @@ public class RMAppBlock extends AppBlock{
     }
     attemptsTableData.append("]");
     html.script().$type("text/javascript")
-        ._("var attemptsTableData=" + attemptsTableData)._();
+        .__("var attemptsTableData=" + attemptsTableData).__();
 
-    tbody._()._();
+    tbody.__().__();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppLogAggregationStatusBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppLogAggregationStatusBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppLogAggregationStatusBlock.java
index f7f7c97..c1f2e5e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppLogAggregationStatusBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppLogAggregationStatusBlock.java
@@ -38,9 +38,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.util.Apps;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -87,28 +87,28 @@ public class RMAppLogAggregationStatusBlock extends HtmlBlock {
       tr().
         th(_TH, "Log Aggregation Status").
         th(_TH, "Description").
-      _();
+        __();
     table_description.tr().td(LogAggregationStatus.DISABLED.name())
-      .td("Log Aggregation is Disabled.")._();
+      .td("Log Aggregation is Disabled.").__();
     table_description.tr().td(LogAggregationStatus.NOT_START.name())
-      .td("Log Aggregation does not Start.")._();
+      .td("Log Aggregation does not Start.").__();
     table_description.tr().td(LogAggregationStatus.RUNNING.name())
-      .td("Log Aggregation is Running.")._();
+      .td("Log Aggregation is Running.").__();
     table_description.tr().td(LogAggregationStatus.RUNNING_WITH_FAILURE.name())
       .td("Log Aggregation is Running, but has failures "
-          + "in previous cycles")._();
+          + "in previous cycles").__();
     table_description.tr().td(LogAggregationStatus.SUCCEEDED.name())
       .td("Log Aggregation is Succeeded. All of the logs have been "
-          + "aggregated successfully.")._();
+          + "aggregated successfully.").__();
     table_description.tr().td(LogAggregationStatus.FAILED.name())
       .td("Log Aggregation is Failed. At least one of the logs "
-          + "have not been aggregated.")._();
+          + "have not been aggregated.").__();
     table_description.tr().td(LogAggregationStatus.TIME_OUT.name())
       .td("The application is finished, but the log aggregation status is "
           + "not updated for a long time. Not sure whether the log aggregation "
-          + "is finished or not.")._();
-    table_description._();
-    div_description._();
+          + "is finished or not.").__();
+    table_description.__();
+    div_description.__();
 
     RMApp rmApp = rm.getRMContext().getRMApps().get(appId);
     // Application Log aggregation status Table
@@ -131,7 +131,7 @@ public class RMAppLogAggregationStatusBlock extends HtmlBlock {
       .th(_TH, "Last "
           + maxLogAggregationDiagnosticsInMemory + " Diagnostic Messages")
       .th(_TH, "Last "
-          + maxLogAggregationDiagnosticsInMemory + " Failure Messages")._();
+          + maxLogAggregationDiagnosticsInMemory + " Failure Messages").__();
 
     if (rmApp != null) {
       Map<NodeId, LogAggregationReport> logAggregationReports =
@@ -152,11 +152,11 @@ public class RMAppLogAggregationStatusBlock extends HtmlBlock {
             .td(report.getKey().toString())
             .td(status == null ? "N/A" : status.toString())
             .td(message == null ? "N/A" : message)
-            .td(failureMessage == null ? "N/A" : failureMessage)._();
+            .td(failureMessage == null ? "N/A" : failureMessage).__();
         }
       }
     }
-    table._();
-    div._();
+    table.__();
+    div.__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
index 61674d2..ede71e3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
@@ -35,11 +35,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.webapp.AppsBlock;
 import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.webapp.View;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
 
 import com.google.inject.Inject;
 
@@ -72,8 +71,8 @@ public class RMAppsBlock extends AppsBlock {
           .th(".clusterPercentage", "% of Cluster")
           .th(".progress", "Progress")
           .th(".ui", "Tracking UI")
-          .th(".blacklisted", "Blacklisted Nodes")._()
-          ._().tbody();
+          .th(".blacklisted", "Blacklisted Nodes").__()
+          .__().tbody();
 
     StringBuilder appsTableData = new StringBuilder("[\n");
     for (ApplicationReport appReport : appReports) {
@@ -190,8 +189,8 @@ public class RMAppsBlock extends AppsBlock {
     }
     appsTableData.append("]");
     html.script().$type("text/javascript")
-      ._("var appsTableData=" + appsTableData)._();
+      .__("var appsTableData=" + appsTableData).__();
 
-    tbody._()._();
+    tbody.__().__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMErrorsAndWarningsPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMErrorsAndWarningsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMErrorsAndWarningsPage.java
index 216deeb..c2ac59d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMErrorsAndWarningsPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMErrorsAndWarningsPage.java
@@ -31,7 +31,7 @@ public class RMErrorsAndWarningsPage extends RmView {
   }
 
   @Override
-  protected void preHead(Page.HTML<_> html) {
+  protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     String title = "Errors and Warnings in the ResourceManager";
     setTitle(title);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RedirectionErrorPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RedirectionErrorPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RedirectionErrorPage.java
index beb0cca..d81e64d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RedirectionErrorPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RedirectionErrorPage.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams;
  * because of a redirection issue.
  */
 public class RedirectionErrorPage extends RmView {
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     String aid = $(YarnWebParams.APPLICATION_ID);
 
     commonPreHead(html);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
index 1a437f8..fc844f9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
@@ -33,7 +33,7 @@ public class RmView extends TwoColumnLayout {
   static final int MAX_FAST_ROWS = 1000;    // inline js array
 
   @Override
-  protected void preHead(Page.HTML<_> html) {
+  protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     set(DATATABLES_ID, "apps");
     set(initID(DATATABLES, "apps"), initAppsTable());
@@ -45,7 +45,7 @@ public class RmView extends TwoColumnLayout {
     setTitle(sjoin(reqState, "Applications"));
   }
 
-  protected void commonPreHead(Page.HTML<_> html) {
+  protected void commonPreHead(Page.HTML<__> html) {
     set(ACCORDION_ID, "nav");
     set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/SchedulerPageUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/SchedulerPageUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/SchedulerPageUtil.java
index 99c0565..8c2d271 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/SchedulerPageUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/SchedulerPageUtil.java
@@ -27,7 +27,7 @@ public class SchedulerPageUtil {
     private void reopenQueue(Block html) {
       html.
           script().$type("text/javascript").
-          _("function reopenQueryNodes() {",
+          __("function reopenQueryNodes() {",
             "  var currentParam = window.location.href.split('?');",
             "  var tmpCurrentParam = currentParam;",
             "  var queryQueuesString = '';",
@@ -52,13 +52,13 @@ public class SchedulerPageUtil {
             "                  'open_node.jstree' :function(e, data) { storeExpandedQueue(e, data); },",
             "                  'close_node.jstree':function(e, data) { storeExpandedQueue(e, data); }",
             "  });",
-            "}")._();
+            "}").__();
     }
 
     private void storeExpandedQueue (Block html) {
       html.
           script().$type("text/javascript").
-          _("function storeExpandedQueue(e, data) {",
+          __("function storeExpandedQueue(e, data) {",
             "  var OPEN_QUEUES = 'openQueues';",
             "  var ACTION_OPEN = 'open';",
             "  var ACTION_CLOSED = 'closed';",
@@ -166,7 +166,7 @@ public class SchedulerPageUtil {
             "    queryString = queryString + '#' + queueName;",
             "  }",
             "  return queryString;",
-            "}")._();
+            "}").__();
     }
 
     @Override protected void render(Block html) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/webapp/SCMOverviewPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/webapp/SCMOverviewPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/webapp/SCMOverviewPage.java
index 27944d3..cec085b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/webapp/SCMOverviewPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/webapp/SCMOverviewPage.java
@@ -43,7 +43,7 @@ import com.google.inject.Inject;
 @Unstable
 public class SCMOverviewPage extends TwoColumnLayout {
 
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     set(ACCORDION_ID, "nav");
     set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
   }
@@ -60,9 +60,9 @@ public class SCMOverviewPage extends TwoColumnLayout {
   static private class SCMOverviewNavBlock extends HtmlBlock {
     @Override
     protected void render(Block html) {
-      html.div("#nav").h3("Tools").ul().li().a("/conf", "Configuration")._()
-          .li().a("/stacks", "Thread dump")._().li().a("/logs", "Logs")._()
-          .li().a("/metrics", "Metrics")._()._()._();
+      html.div("#nav").h3("Tools").ul().li().a("/conf", "Configuration").__()
+          .li().a("/stacks", "Thread dump").__().li().a("/logs", "Logs").__()
+          .li().a("/metrics", "Metrics").__().__().__();
     }
   }
 
@@ -81,15 +81,15 @@ public class SCMOverviewPage extends TwoColumnLayout {
           CleanerMetrics.getInstance(), ClientSCMMetrics.getInstance(),
               SharedCacheUploaderMetrics.getInstance());
       info("Shared Cache Manager overview").
-          _("Started on:", Times.format(scm.getStartTime())).
-          _("Cache hits: ", metricsInfo.getCacheHits()).
-          _("Cache misses: ", metricsInfo.getCacheMisses()).
-          _("Cache releases: ", metricsInfo.getCacheReleases()).
-          _("Accepted uploads: ", metricsInfo.getAcceptedUploads()).
-          _("Rejected uploads: ", metricsInfo.getRejectUploads()).
-          _("Deleted files by the cleaner: ", metricsInfo.getTotalDeletedFiles()).
-          _("Processed files by the cleaner: ", metricsInfo.getTotalProcessedFiles());
-      html._(InfoBlock.class);
+          __("Started on:", Times.format(scm.getStartTime())).
+          __("Cache hits: ", metricsInfo.getCacheHits()).
+          __("Cache misses: ", metricsInfo.getCacheMisses()).
+          __("Cache releases: ", metricsInfo.getCacheReleases()).
+          __("Accepted uploads: ", metricsInfo.getAcceptedUploads()).
+          __("Rejected uploads: ", metricsInfo.getRejectUploads()).
+          __("Deleted files by the cleaner: ", metricsInfo.getTotalDeletedFiles()).
+          __("Processed files by the cleaner: ", metricsInfo.getTotalProcessedFiles());
+      html.__(InfoBlock.class);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUtils.java
index 7d61f74..4886c55 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUtils.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.yarn.server.webproxy;
 
 import org.apache.hadoop.yarn.webapp.MimeType;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -41,7 +41,7 @@ public class ProxyUtils {
       "This filter only works for HTTP/HTTPS";
   public static final String LOCATION = "Location";
 
-  public static class _ implements Hamlet._ {
+  public static class __ implements Hamlet.__ {
     //Empty
   }
 
@@ -50,7 +50,7 @@ public class ProxyUtils {
       super(out, 0, false);
     }
 
-    public HTML<ProxyUtils._> html() {
+    public HTML<ProxyUtils.__> html() {
       return new HTML<>("html", null, EnumSet.of(EOpt.ENDTAG));
     }
   }
@@ -86,13 +86,13 @@ public class ProxyUtils {
     PrintWriter writer = response.getWriter();
     Page p = new Page(writer);
     p.html()
-        .head().title("Moved")._()
+        .head().title("Moved").__()
         .body()
         .h1("Moved")
         .div()
-          ._("Content has moved ")
-          .a(location, "here")._()
-        ._()._();
+          .__("Content has moved ")
+          .a(location, "here").__()
+        .__().__();
     writer.close();
   }
 
@@ -110,7 +110,7 @@ public class ProxyUtils {
     Page p = new Page(resp.getWriter());
     p.html().
         h1(message).
-         _();
+        __();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
index b32ee30..e1588c1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
@@ -58,7 +58,7 @@ import org.apache.hadoop.yarn.util.Apps;
 import org.apache.hadoop.yarn.util.StringHelper;
 import org.apache.hadoop.yarn.util.TrackingUriPlugin;
 import org.apache.hadoop.yarn.webapp.MimeType;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.apache.http.Header;
 import org.apache.http.HttpResponse;
@@ -108,7 +108,7 @@ public class WebAppProxyServlet extends HttpServlet {
   /**
    * Empty Hamlet class.
    */
-  private static class _ implements Hamlet._ {
+  private static class __ implements Hamlet.__ {
     //Empty
   }
   
@@ -117,7 +117,7 @@ public class WebAppProxyServlet extends HttpServlet {
       super(out, 0, false);
     }
   
-    public HTML<WebAppProxyServlet._> html() {
+    public HTML<WebAppProxyServlet.__> html() {
       return new HTML<>("html", null, EnumSet.of(EOpt.ENDTAG));
     }
   }
@@ -172,10 +172,10 @@ public class WebAppProxyServlet extends HttpServlet {
     p.html().
       h1("WARNING: The following page may not be safe!").
       h3().
-      _("click ").a(link, "here").
-      _(" to continue to an Application Master web interface owned by ", user).
-      _().
-    _();
+        __("click ").a(link, "here").
+        __(" to continue to an Application Master web interface owned by ", user).
+        __().
+        __();
   }
   
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: YARN-6802. Add max AM resource and AM resource usage to leaf queue view in FairScheduler WebUI. (YunFan Zhou via Yufei Gu)

Posted by xg...@apache.org.
YARN-6802. Add max AM resource and AM resource usage to leaf queue view in FairScheduler WebUI. (YunFan Zhou via Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ea01fd9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ea01fd9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ea01fd9

Branch: refs/heads/YARN-5734
Commit: 9ea01fd956b7027fa28fbed07f57d0c9c460c283
Parents: f735ad1
Author: Yufei Gu <yu...@apache.org>
Authored: Fri Jul 28 09:51:06 2017 -0700
Committer: Yufei Gu <yu...@apache.org>
Committed: Fri Jul 28 09:52:53 2017 -0700

----------------------------------------------------------------------
 .../webapp/FairSchedulerPage.java               |  2 ++
 .../webapp/dao/FairSchedulerQueueInfo.java      | 23 ++++++++++++++++++++
 2 files changed, 25 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ea01fd9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
index ffa4594..ef417d4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
@@ -72,6 +72,8 @@ public class FairSchedulerPage extends RmView {
       ResponseInfo ri = info("\'" + qinfo.getQueueName() + "\' Queue Status").
           __("Used Resources:", qinfo.getUsedResources().toString()).
           __("Demand Resources:", qinfo.getDemandResources().toString()).
+          __("AM Used Resources:", qinfo.getAMUsedResources().toString()).
+          __("AM Max Resources:", qinfo.getAMMaxResources().toString()).
           __("Num Active Applications:", qinfo.getNumActiveApplications()).
           __("Num Pending Applications:", qinfo.getNumPendingApplications()).
           __("Min Resources:", qinfo.getMinResources().toString()).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ea01fd9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
index fa14bae..a4607c2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
@@ -28,6 +28,7 @@ import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlSeeAlso;
 import javax.xml.bind.annotation.XmlTransient;
 
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSLeafQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
@@ -54,6 +55,8 @@ public class FairSchedulerQueueInfo {
   private ResourceInfo minResources;
   private ResourceInfo maxResources;
   private ResourceInfo usedResources;
+  private ResourceInfo amUsedResources;
+  private ResourceInfo amMaxResources;
   private ResourceInfo demandResources;
   private ResourceInfo steadyFairResources;
   private ResourceInfo fairResources;
@@ -82,6 +85,12 @@ public class FairSchedulerQueueInfo {
     
     clusterResources = new ResourceInfo(scheduler.getClusterResource());
     
+    amUsedResources = new ResourceInfo(Resource.newInstance(
+        queue.getMetrics().getAMResourceUsageMB(),
+        queue.getMetrics().getAMResourceUsageVCores()));
+    amMaxResources = new ResourceInfo(Resource.newInstance(
+        queue.getMetrics().getMaxAMShareMB(),
+        queue.getMetrics().getMaxAMShareVCores()));
     usedResources = new ResourceInfo(queue.getResourceUsage());
     demandResources = new ResourceInfo(queue.getDemand());
     fractionMemUsed = (float)usedResources.getMemorySize() /
@@ -205,6 +214,20 @@ public class FairSchedulerQueueInfo {
   }
 
   /**
+   * @return the am used resource of this queue.
+   */
+  public ResourceInfo getAMUsedResources() {
+    return amUsedResources;
+  }
+
+  /**
+   * @return the am max resource of this queue.
+   */
+  public ResourceInfo getAMMaxResources() {
+    return amMaxResources;
+  }
+
+  /**
    * @return the demand resource of this queue.
      */
   public ResourceInfo getDemandResources() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: YARN-5951. Changes to allow CapacityScheduler to use configuration store

Posted by xg...@apache.org.
YARN-5951. Changes to allow CapacityScheduler to use configuration store


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/512f5c95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/512f5c95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/512f5c95

Branch: refs/heads/YARN-5734
Commit: 512f5c95994c3018b04f3af9a3fad8428480e8f5
Parents: 0fd6d0f
Author: Jonathan Hung <jh...@linkedin.com>
Authored: Mon Jan 30 19:03:48 2017 -0800
Committer: Xuan <xg...@apache.org>
Committed: Mon Jul 31 08:54:54 2017 -0700

----------------------------------------------------------------------
 .../scheduler/capacity/CapacityScheduler.java   | 36 +++++------
 .../CapacitySchedulerConfiguration.java         | 10 +++
 .../capacity/conf/CSConfigurationProvider.java  | 46 ++++++++++++++
 .../conf/FileBasedCSConfigurationProvider.java  | 67 ++++++++++++++++++++
 .../scheduler/capacity/conf/package-info.java   | 29 +++++++++
 .../capacity/TestCapacityScheduler.java         |  4 +-
 6 files changed, 170 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/512f5c95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 2ccaf63..a6feb09 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
@@ -105,6 +104,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Activi
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityDiagnosticConstant;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.AllocationState;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.CSConfigurationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.FileBasedCSConfigurationProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.KillableContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
@@ -163,6 +164,7 @@ public class CapacityScheduler extends
 
   private int offswitchPerHeartbeatLimit;
 
+  private CSConfigurationProvider csConfProvider;
 
   @Override
   public void setConf(Configuration conf) {
@@ -286,7 +288,18 @@ public class CapacityScheduler extends
       IOException {
     try {
       writeLock.lock();
-      this.conf = loadCapacitySchedulerConfiguration(configuration);
+      String confProviderStr = configuration.get(
+          CapacitySchedulerConfiguration.CS_CONF_PROVIDER,
+          CapacitySchedulerConfiguration.DEFAULT_CS_CONF_PROVIDER);
+      if (confProviderStr.equals(
+          CapacitySchedulerConfiguration.FILE_CS_CONF_PROVIDER)) {
+        this.csConfProvider = new FileBasedCSConfigurationProvider(rmContext);
+      } else {
+        throw new IOException("Invalid CS configuration provider: " +
+            confProviderStr);
+      }
+      this.csConfProvider.init(configuration);
+      this.conf = this.csConfProvider.loadConfiguration(configuration);
       validateConf(this.conf);
       this.minimumAllocation = this.conf.getMinimumAllocation();
       initMaximumResourceCapability(this.conf.getMaximumAllocation());
@@ -393,7 +406,7 @@ public class CapacityScheduler extends
       writeLock.lock();
       Configuration configuration = new Configuration(newConf);
       CapacitySchedulerConfiguration oldConf = this.conf;
-      this.conf = loadCapacitySchedulerConfiguration(configuration);
+      this.conf = csConfProvider.loadConfiguration(configuration);
       validateConf(this.conf);
       try {
         LOG.info("Re-initializing queues...");
@@ -1777,23 +1790,6 @@ public class CapacityScheduler extends
     return true;
   }
 
-  private CapacitySchedulerConfiguration loadCapacitySchedulerConfiguration(
-      Configuration configuration) throws IOException {
-    try {
-      InputStream CSInputStream =
-          this.rmContext.getConfigurationProvider()
-              .getConfigurationInputStream(configuration,
-                  YarnConfiguration.CS_CONFIGURATION_FILE);
-      if (CSInputStream != null) {
-        configuration.addResource(CSInputStream);
-        return new CapacitySchedulerConfiguration(configuration, false);
-      }
-      return new CapacitySchedulerConfiguration(configuration, true);
-    } catch (Exception e) {
-      throw new IOException(e);
-    }
-  }
-
   private String getDefaultReservationQueueName(String planQueueName) {
     return planQueueName + ReservationConstants.DEFAULT_QUEUE_SUFFIX;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/512f5c95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 1e29d50..ac1a1d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -301,6 +301,16 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
   @Private
   public static final boolean DEFAULT_LAZY_PREEMPTION_ENABLED = false;
 
+  @Private
+  public static final String CS_CONF_PROVIDER = PREFIX
+      + "configuration.provider";
+
+  @Private
+  public static final String FILE_CS_CONF_PROVIDER = "file";
+
+  @Private
+  public static final String DEFAULT_CS_CONF_PROVIDER = FILE_CS_CONF_PROVIDER;
+
   AppPriorityACLConfigurationParser priorityACLConfig = new AppPriorityACLConfigurationParser();
 
   public CapacitySchedulerConfiguration() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/512f5c95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
new file mode 100644
index 0000000..c9984ac
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+
+import java.io.IOException;
+
+/**
+ * Configuration provider for {@link CapacityScheduler}.
+ */
+public interface CSConfigurationProvider {
+
+  /**
+   * Initialize the configuration provider with given conf.
+   * @param conf configuration to initialize with
+   */
+  void init(Configuration conf);
+
+  /**
+   * Loads capacity scheduler configuration object.
+   * @param conf initial bootstrap configuration
+   * @return CS configuration
+   * @throws IOException if fail to retrieve configuration
+   */
+  CapacitySchedulerConfiguration loadConfiguration(Configuration conf)
+      throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/512f5c95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FileBasedCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FileBasedCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FileBasedCSConfigurationProvider.java
new file mode 100644
index 0000000..51c64fa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FileBasedCSConfigurationProvider.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * {@link CapacityScheduler} configuration provider based on local
+ * {@code capacity-scheduler.xml} file.
+ */
+public class FileBasedCSConfigurationProvider implements
+    CSConfigurationProvider {
+
+  private RMContext rmContext;
+
+  /**
+   * Construct file based CS configuration provider with given context.
+   * @param rmContext the RM context
+   */
+  public FileBasedCSConfigurationProvider(RMContext rmContext) {
+    this.rmContext = rmContext;
+  }
+
+  @Override
+  public void init(Configuration conf) {}
+
+  @Override
+  public CapacitySchedulerConfiguration loadConfiguration(Configuration conf)
+      throws IOException {
+    try {
+      InputStream csInputStream =
+          this.rmContext.getConfigurationProvider()
+              .getConfigurationInputStream(conf,
+                  YarnConfiguration.CS_CONFIGURATION_FILE);
+      if (csInputStream != null) {
+        conf.addResource(csInputStream);
+        return new CapacitySchedulerConfiguration(conf, false);
+      }
+      return new CapacitySchedulerConfiguration(conf, true);
+    } catch (Exception e) {
+      throw new IOException(e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/512f5c95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/package-info.java
new file mode 100644
index 0000000..08d0522
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/package-info.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package
+ * org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf
+ * contains classes related to capacity scheduler configuration management.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/512f5c95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 0642cd9..b8af469 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -236,13 +236,13 @@ public class TestCapacityScheduler {
 
   @Test (timeout = 30000)
   public void testConfValidation() throws Exception {
-    ResourceScheduler scheduler = new CapacityScheduler();
+    CapacityScheduler scheduler = new CapacityScheduler();
     scheduler.setRMContext(resourceManager.getRMContext());
     Configuration conf = new YarnConfiguration();
     conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 2048);
     conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 1024);
     try {
-      scheduler.reinitialize(conf, mockContext);
+      scheduler.init(conf);
       fail("Exception is expected because the min memory allocation is" +
         " larger than the max memory allocation.");
     } catch (YarnRuntimeException e) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: YARN-6864. FSPreemptionThread cleanup for readability. (Daniel Templeton via Yufei Gu)

Posted by xg...@apache.org.
YARN-6864. FSPreemptionThread cleanup for readability. (Daniel Templeton via Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9902be72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9902be72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9902be72

Branch: refs/heads/YARN-5734
Commit: 9902be72cbf7a170caa5cb1f13c227d881a39064
Parents: 38c6fa5
Author: Yufei Gu <yu...@apache.org>
Authored: Thu Jul 27 23:19:39 2017 -0700
Committer: Yufei Gu <yu...@apache.org>
Committed: Thu Jul 27 23:19:39 2017 -0700

----------------------------------------------------------------------
 .../scheduler/fair/FSPreemptionThread.java      | 28 ++++++++++----------
 1 file changed, 14 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9902be72/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
index efe36a6..b3e59c5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
@@ -66,11 +66,11 @@ class FSPreemptionThread extends Thread {
     schedulerReadLock = scheduler.getSchedulerReadLock();
   }
 
+  @Override
   public void run() {
     while (!Thread.interrupted()) {
-      FSAppAttempt starvedApp;
-      try{
-        starvedApp = context.getStarvedApps().take();
+      try {
+        FSAppAttempt starvedApp = context.getStarvedApps().take();
         // Hold the scheduler readlock so this is not concurrent with the
         // update thread.
         schedulerReadLock.lock();
@@ -82,7 +82,7 @@ class FSPreemptionThread extends Thread {
         starvedApp.preemptionTriggered(delayBeforeNextStarvationCheck);
       } catch (InterruptedException e) {
         LOG.info("Preemption thread interrupted! Exiting.");
-        return;
+        Thread.currentThread().interrupt();
       }
     }
   }
@@ -112,16 +112,19 @@ class FSPreemptionThread extends Thread {
         PreemptableContainers bestContainers = null;
         List<FSSchedulerNode> potentialNodes = scheduler.getNodeTracker()
             .getNodesByResourceName(rr.getResourceName());
+        int maxAMContainers = Integer.MAX_VALUE;
+
         for (FSSchedulerNode node : potentialNodes) {
-          int maxAMContainers = bestContainers == null ?
-              Integer.MAX_VALUE : bestContainers.numAMContainers;
           PreemptableContainers preemptableContainers =
               identifyContainersToPreemptOnNode(
                   rr.getCapability(), node, maxAMContainers);
+
           if (preemptableContainers != null) {
             // This set is better than any previously identified set.
             bestContainers = preemptableContainers;
-            if (preemptableContainers.numAMContainers == 0) {
+            maxAMContainers = bestContainers.numAMContainers;
+
+            if (maxAMContainers == 0) {
               break;
             }
           }
@@ -182,13 +185,10 @@ class FSPreemptionThread extends Thread {
         return preemptableContainers;
       }
     }
-    return null;
-  }
 
-  private boolean isNodeAlreadyReserved(
-      FSSchedulerNode node, FSAppAttempt app) {
-    FSAppAttempt nodeReservedApp = node.getReservedAppSchedulable();
-    return nodeReservedApp != null && !nodeReservedApp.equals(app);
+    // Return null if the sum of all preemptable containers' resources
+    // isn't enough to satisfy the starved request.
+    return null;
   }
 
   private void trackPreemptionsAgainstNode(List<RMContainer> containers,
@@ -214,7 +214,7 @@ class FSPreemptionThread extends Thread {
   }
 
   private class PreemptContainersTask extends TimerTask {
-    private List<RMContainer> containers;
+    private final List<RMContainer> containers;
 
     PreemptContainersTask(List<RMContainer> containers) {
       this.containers = containers;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: YARN-6845. Variable scheduler of FSLeafQueue duplicates the one of its parent FSQueue. (Contributed by Yufei Gu via Daniel Templeton)

Posted by xg...@apache.org.
YARN-6845. Variable scheduler of FSLeafQueue duplicates the one of its parent FSQueue.
(Contributed by Yufei Gu via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/10583625
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/10583625
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/10583625

Branch: refs/heads/YARN-5734
Commit: 10583625c1c803fc243adf6479cb9435af7e72da
Parents: bb30bd3
Author: Daniel Templeton <te...@apache.org>
Authored: Mon Jul 24 13:44:00 2017 -0700
Committer: Daniel Templeton <te...@apache.org>
Committed: Mon Jul 24 13:44:00 2017 -0700

----------------------------------------------------------------------
 .../yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java    | 2 --
 1 file changed, 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10583625/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 1de0e30..b911a1a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -52,7 +52,6 @@ public class FSLeafQueue extends FSQueue {
   private static final Log LOG = LogFactory.getLog(FSLeafQueue.class.getName());
   private static final List<FSQueue> EMPTY_LIST = Collections.emptyList();
 
-  private FairScheduler scheduler;
   private FSContext context;
 
   // apps that are runnable
@@ -76,7 +75,6 @@ public class FSLeafQueue extends FSQueue {
   public FSLeafQueue(String name, FairScheduler scheduler,
       FSParentQueue parent) {
     super(name, scheduler, parent);
-    this.scheduler = scheduler;
     this.context = scheduler.getContext();
     this.lastTimeAtMinShare = scheduler.getClock().getTime();
     activeUsersManager = new ActiveUsersManager(getMetrics());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: HADOOP-14692. Upgrade Apache Rat

Posted by xg...@apache.org.
HADOOP-14692. Upgrade Apache Rat

Signed-off-by: Anu Engineer <ae...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f4808ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f4808ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f4808ce

Branch: refs/heads/YARN-5734
Commit: 5f4808ce73a373e646ce324b0037dca54e8adc1e
Parents: c4a85c6
Author: Allen Wittenauer <aw...@apache.org>
Authored: Thu Jul 27 13:04:32 2017 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Thu Jul 27 13:04:50 2017 -0700

----------------------------------------------------------------------
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f4808ce/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index e7f6eeb..29524a4 100644
--- a/pom.xml
+++ b/pom.xml
@@ -102,7 +102,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
     <maven-gpg-plugin.version>1.5</maven-gpg-plugin.version>
     <maven-remote-resources-plugin.version>1.5</maven-remote-resources-plugin.version>
     <maven-resources-plugin.version>3.0.1</maven-resources-plugin.version>
-    <apache-rat-plugin.version>0.10</apache-rat-plugin.version>
+    <apache-rat-plugin.version>0.12</apache-rat-plugin.version>
     <wagon-ssh.version>1.0</wagon-ssh.version>
     <maven-clover2-plugin.version>3.3.0</maven-clover2-plugin.version>
     <maven-bundle-plugin.version>2.5.0</maven-bundle-plugin.version>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: YARN-5948. Implement MutableConfigurationManager for handling storage into configuration store

Posted by xg...@apache.org.
YARN-5948. Implement MutableConfigurationManager for handling storage into configuration store


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac11f497
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac11f497
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac11f497

Branch: refs/heads/YARN-5734
Commit: ac11f4978137447631a0531849c01010be65d3a5
Parents: ad0caa2
Author: Jonathan Hung <jh...@linkedin.com>
Authored: Wed Mar 1 16:03:01 2017 -0800
Committer: Xuan <xg...@apache.org>
Committed: Mon Jul 31 08:57:36 2017 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |  6 ++
 .../src/main/resources/yarn-default.xml         | 12 +++
 .../scheduler/MutableConfigurationProvider.java | 35 ++++++++
 .../scheduler/capacity/CapacityScheduler.java   | 14 ++-
 .../CapacitySchedulerConfiguration.java         |  3 +
 .../capacity/conf/CSConfigurationProvider.java  |  3 +-
 .../conf/MutableCSConfigurationProvider.java    | 94 ++++++++++++++++++++
 .../conf/YarnConfigurationStoreFactory.java     | 46 ++++++++++
 .../TestMutableCSConfigurationProvider.java     | 83 +++++++++++++++++
 9 files changed, 291 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac11f497/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 93437e3..ce413f6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -621,6 +621,12 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS =
       "org.apache.hadoop.yarn.LocalConfigurationProvider";
 
+  public static final String SCHEDULER_CONFIGURATION_STORE_CLASS =
+      YARN_PREFIX + "scheduler.configuration.store.class";
+  public static final String MEMORY_CONFIGURATION_STORE = "memory";
+  public static final String DEFAULT_CONFIGURATION_STORE =
+      MEMORY_CONFIGURATION_STORE;
+
   public static final String YARN_AUTHORIZATION_PROVIDER = YARN_PREFIX
       + "authorization-provider";
   private static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS_HTTP =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac11f497/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 7ddcfcd..74ff747 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3136,4 +3136,16 @@
     <value>user-group</value>
   </property>
 
+  <property>
+    <description>
+      The type of configuration store to use for storing scheduler
+      configurations, if using a mutable configuration provider.
+      Keywords such as "memory" map to certain configuration store
+      implementations. If keyword is not found, try to load this
+      value as a class.
+    </description>
+    <name>yarn.scheduler.configuration.store.class</name>
+    <value>memory</value>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac11f497/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
new file mode 100644
index 0000000..da30a2b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import java.util.Map;
+
+/**
+ * Interface for allowing changing scheduler configurations.
+ */
+public interface MutableConfigurationProvider {
+
+  /**
+   * Update the scheduler configuration with the provided key value pairs.
+   * @param user User issuing the request
+   * @param confUpdate Key-value pairs for configurations to be updated.
+   */
+  void mutateConfiguration(String user, Map<String, String> confUpdate);
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac11f497/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index a6feb09..ca6e872 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -106,6 +106,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Activi
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.AllocationState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.CSConfigurationProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.FileBasedCSConfigurationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.MutableCSConfigurationProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.KillableContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
@@ -291,10 +292,15 @@ public class CapacityScheduler extends
       String confProviderStr = configuration.get(
           CapacitySchedulerConfiguration.CS_CONF_PROVIDER,
           CapacitySchedulerConfiguration.DEFAULT_CS_CONF_PROVIDER);
-      if (confProviderStr.equals(
-          CapacitySchedulerConfiguration.FILE_CS_CONF_PROVIDER)) {
-        this.csConfProvider = new FileBasedCSConfigurationProvider(rmContext);
-      } else {
+      switch (confProviderStr) {
+      case CapacitySchedulerConfiguration.FILE_CS_CONF_PROVIDER:
+        this.csConfProvider =
+            new FileBasedCSConfigurationProvider(rmContext);
+        break;
+      case CapacitySchedulerConfiguration.STORE_CS_CONF_PROVIDER:
+        this.csConfProvider = new MutableCSConfigurationProvider(rmContext);
+        break;
+      default:
         throw new IOException("Invalid CS configuration provider: " +
             confProviderStr);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac11f497/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index ac1a1d9..f7f7ac7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -309,6 +309,9 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
   public static final String FILE_CS_CONF_PROVIDER = "file";
 
   @Private
+  public static final String STORE_CS_CONF_PROVIDER = "store";
+
+  @Private
   public static final String DEFAULT_CS_CONF_PROVIDER = FILE_CS_CONF_PROVIDER;
 
   AppPriorityACLConfigurationParser priorityACLConfig = new AppPriorityACLConfigurationParser();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac11f497/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
index c9984ac..0d2c8bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
@@ -32,8 +32,9 @@ public interface CSConfigurationProvider {
   /**
    * Initialize the configuration provider with given conf.
    * @param conf configuration to initialize with
+   * @throws IOException if initialization fails due to misconfiguration
    */
-  void init(Configuration conf);
+  void init(Configuration conf) throws IOException;
 
   /**
    * Loads capacity scheduler configuration object.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac11f497/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
new file mode 100644
index 0000000..267ab6a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfigurationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore.LogMutation;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * CS configuration provider which implements
+ * {@link MutableConfigurationProvider} for modifying capacity scheduler
+ * configuration.
+ */
+public class MutableCSConfigurationProvider implements CSConfigurationProvider,
+    MutableConfigurationProvider {
+
+  private Configuration schedConf;
+  private YarnConfigurationStore confStore;
+  private RMContext rmContext;
+  private Configuration conf;
+
+  public MutableCSConfigurationProvider(RMContext rmContext) {
+    this.rmContext = rmContext;
+  }
+
+  @Override
+  public void init(Configuration config) throws IOException {
+    String store = config.get(
+        YarnConfiguration.SCHEDULER_CONFIGURATION_STORE_CLASS,
+        YarnConfiguration.DEFAULT_CONFIGURATION_STORE);
+    switch (store) {
+    case YarnConfiguration.MEMORY_CONFIGURATION_STORE:
+      this.confStore = new InMemoryConfigurationStore();
+      break;
+    default:
+      this.confStore = YarnConfigurationStoreFactory.getStore(config);
+      break;
+    }
+    Configuration initialSchedConf = new Configuration(false);
+    initialSchedConf.addResource(YarnConfiguration.CS_CONFIGURATION_FILE);
+    this.schedConf = initialSchedConf;
+    confStore.initialize(config, initialSchedConf);
+    this.conf = config;
+  }
+
+  @Override
+  public CapacitySchedulerConfiguration loadConfiguration(Configuration
+      configuration) throws IOException {
+    Configuration loadedConf = new Configuration(configuration);
+    loadedConf.addResource(schedConf);
+    return new CapacitySchedulerConfiguration(loadedConf, false);
+  }
+
+  @Override
+  public void mutateConfiguration(String user,
+      Map<String, String> confUpdate) {
+    Configuration oldConf = new Configuration(schedConf);
+    LogMutation log = new LogMutation(confUpdate, user);
+    long id = confStore.logMutation(log);
+    for (Map.Entry<String, String> kv : confUpdate.entrySet()) {
+      schedConf.set(kv.getKey(), kv.getValue());
+    }
+    try {
+      rmContext.getScheduler().reinitialize(conf, rmContext);
+    } catch (IOException e) {
+      schedConf = oldConf;
+      confStore.confirmMutation(id, false);
+      return;
+    }
+    confStore.confirmMutation(id, true);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac11f497/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStoreFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStoreFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStoreFactory.java
new file mode 100644
index 0000000..60249c8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStoreFactory.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+/**
+ * Factory class for creating instances of {@link YarnConfigurationStore}.
+ */
+public final class YarnConfigurationStoreFactory {
+
+  private static final Log LOG = LogFactory.getLog(
+      YarnConfigurationStoreFactory.class);
+
+  private YarnConfigurationStoreFactory() {
+    // Unused.
+  }
+
+  public static YarnConfigurationStore getStore(Configuration conf) {
+    Class<? extends YarnConfigurationStore> storeClass =
+        conf.getClass(YarnConfiguration.SCHEDULER_CONFIGURATION_STORE_CLASS,
+            InMemoryConfigurationStore.class, YarnConfigurationStore.class);
+    LOG.info("Using YarnConfigurationStore implementation - " + storeClass);
+    return ReflectionUtils.newInstance(storeClass, conf);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac11f497/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
new file mode 100644
index 0000000..3f103b1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests {@link MutableCSConfigurationProvider}.
+ */
+public class TestMutableCSConfigurationProvider {
+
+  private MutableCSConfigurationProvider confProvider;
+  private RMContext rmContext;
+  private Map<String, String> goodUpdate;
+  private Map<String, String> badUpdate;
+  private CapacityScheduler cs;
+
+  private static final String TEST_USER = "testUser";
+
+  @Before
+  public void setUp() {
+    cs = mock(CapacityScheduler.class);
+    rmContext = mock(RMContext.class);
+    when(rmContext.getScheduler()).thenReturn(cs);
+    confProvider = new MutableCSConfigurationProvider(rmContext);
+    goodUpdate = new HashMap<>();
+    goodUpdate.put("goodKey", "goodVal");
+    badUpdate = new HashMap<>();
+    badUpdate.put("badKey", "badVal");
+  }
+
+  @Test
+  public void testInMemoryBackedProvider() throws IOException {
+    Configuration conf = new Configuration();
+    confProvider.init(conf);
+    assertNull(confProvider.loadConfiguration(conf)
+        .get("goodKey"));
+
+    doNothing().when(cs).reinitialize(any(Configuration.class),
+        any(RMContext.class));
+    confProvider.mutateConfiguration(TEST_USER, goodUpdate);
+    assertEquals("goodVal", confProvider.loadConfiguration(conf)
+        .get("goodKey"));
+
+    assertNull(confProvider.loadConfiguration(conf).get("badKey"));
+    doThrow(new IOException()).when(cs).reinitialize(any(Configuration.class),
+        any(RMContext.class));
+    confProvider.mutateConfiguration(TEST_USER, badUpdate);
+    assertNull(confProvider.loadConfiguration(conf).get("badKey"));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: HADOOP-14672. Shaded Hadoop-client-minicluster include unshaded classes, like: javax, sax, dom, etc. Contributed by Bharat Viswanadham.

Posted by xg...@apache.org.
HADOOP-14672. Shaded Hadoop-client-minicluster include unshaded classes, like: javax, sax, dom, etc. Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/481385ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/481385ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/481385ea

Branch: refs/heads/YARN-5734
Commit: 481385ea8a6ac13b23520276e7718710b7b02c89
Parents: f14be0d
Author: Junping Du <ju...@apache.org>
Authored: Sun Jul 30 22:14:54 2017 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Sun Jul 30 22:15:48 2017 -0700

----------------------------------------------------------------------
 .../hadoop-client-minicluster/pom.xml           | 32 ++++++++++++++++++++
 1 file changed, 32 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/481385ea/hadoop-client-modules/hadoop-client-minicluster/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 93811ad..f4b2329 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -127,6 +127,10 @@
           <artifactId>jaxb-api</artifactId>
         </exclusion>
         <exclusion>
+          <groupId>xml-apis</groupId>
+          <artifactId>xml-apis</artifactId>
+        </exclusion>
+        <exclusion>
           <groupId>org.apache.avro</groupId>
           <artifactId>avro</artifactId>
         </exclusion>
@@ -624,6 +628,12 @@
                         <exclude>**/*.class</exclude>
                       </excludes>
                     </filter>
+                    <filter>
+                      <artifact>xerces:xercesImpl</artifact>
+                      <excludes>
+                        <exclude>**/*</exclude>
+                      </excludes>
+                    </filter>
                   </filters>
                   <relocations>
                     <relocation>
@@ -646,6 +656,7 @@
                         <exclude>org/junit/*</exclude>
                         <exclude>org/junit/**/*</exclude>
                         <!-- Not the org/ packages that are a part of the jdk -->
+
                         <exclude>org/ietf/jgss/*</exclude>
                         <exclude>org/omg/**/*</exclude>
                         <exclude>org/w3c/dom/*</exclude>
@@ -655,6 +666,13 @@
                       </excludes>
                     </relocation>
                     <relocation>
+                      <pattern>contribs/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.contribs.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
                       <pattern>com/</pattern>
                       <shadedPattern>${shaded.dependency.prefix}.com.</shadedPattern>
                       <excludes>
@@ -692,6 +710,13 @@
                       </excludes>
                     </relocation>
                     <relocation>
+                      <pattern>javassist/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.javassist.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
                       <pattern>javax/el/</pattern>
                       <shadedPattern>${shaded.dependency.prefix}.javax.el.</shadedPattern>
                       <excludes>
@@ -713,6 +738,13 @@
                       </excludes>
                     </relocation>
                     <relocation>
+                      <pattern>jersey/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.jersey.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
                       <pattern>net/</pattern>
                       <shadedPattern>${shaded.dependency.prefix}.net.</shadedPattern>
                       <excludes>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: HADOOP-11875. [JDK9] Adding a second copy of Hamlet without _ as a one-character identifier.

Posted by xg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlPage.java
index 1d176d4..210cf04 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlPage.java
@@ -25,17 +25,17 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.yarn.webapp.MimeType;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.WebAppException;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 
 /**
  * The parent class of all HTML pages.  Override 
- * {@link #render(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)}
+ * {@link #render(org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.HTML)}
  * to actually render the page.
  */
 @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
 public abstract class HtmlPage extends TextView {
 
-  public static class _ implements Hamlet._ {
+  public static class __ implements Hamlet.__ {
   }
 
   public class Page extends Hamlet {
@@ -50,8 +50,8 @@ public abstract class HtmlPage extends TextView {
       setWasInline(context().wasInline());
     }
 
-    public HTML<HtmlPage._> html() {
-      return new HTML<HtmlPage._>("html", null, EnumSet.of(EOpt.ENDTAG));
+    public HTML<HtmlPage.__> html() {
+      return new HTML<HtmlPage.__>("html", null, EnumSet.of(EOpt.ENDTAG));
     }
   }
 
@@ -91,6 +91,6 @@ public abstract class HtmlPage extends TextView {
    * Render the the HTML page.
    * @param html the page to render data to.
    */
-  protected abstract void render(Page.HTML<_> html);
+  protected abstract void render(Page.HTML<__> html);
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java
index 9fe67f1..0ad8b3c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java
@@ -26,11 +26,11 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TD;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TD;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TR;
 
 import com.google.inject.Inject;
 
@@ -47,7 +47,7 @@ public class InfoBlock extends HtmlBlock {
       div(_INFO_WRAP).
         table(_INFO).
           tr().
-            th().$class(C_TH).$colspan(2)._(info.about())._()._();
+            th().$class(C_TH).$colspan(2).__(info.about()).__().__();
     int i = 0;
     for (ResponseInfo.Item item : info) {
       TR<TABLE<DIV<Hamlet>>> tr = table.
@@ -62,23 +62,23 @@ public class InfoBlock extends HtmlBlock {
         	DIV<TD<TR<TABLE<DIV<Hamlet>>>>> singleLineDiv;
             for ( String line :lines) {
               singleLineDiv = td.div();
-              singleLineDiv._(line);
-              singleLineDiv._();
+              singleLineDiv.__(line);
+              singleLineDiv.__();
             }
           } else {
-            td._(value);
+            td.__(value);
           }
-          td._();
+          td.__();
         } else {
-          tr.td()._r(value)._();
+          tr.td()._r(value).__();
         }
       } else {
         tr.
           td().
-            a(url(item.url), value)._();
+            a(url(item.url), value).__();
       }
-      tr._();
+      tr.__();
     }
-    table._()._();
+    table.__().__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
index 06372e3..46c76d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
@@ -26,7 +26,7 @@ import static org.apache.hadoop.yarn.util.StringHelper.split;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.HTML;
+import org.apache.hadoop.yarn.webapp.hamlet2.HamletSpec.HTML;
 
 import com.google.common.collect.Lists;
 
@@ -82,19 +82,19 @@ public class JQueryUI extends HtmlBlock {
     initProgressBars(list);
 
     if (!list.isEmpty()) {
-      html.script().$type("text/javascript")._("$(function() {")
-          ._(list.toArray())._("});")._();
+      html.script().$type("text/javascript").__("$(function() {")
+          .__(list.toArray()).__("});").__();
     }
   }
 
   public static void jsnotice(HTML html) {
     html.
       div("#jsnotice.ui-state-error").
-          _("This page will not function without javascript enabled."
-            + " Please enable javascript on your browser.")._();
+        __("This page will not function without javascript enabled."
+            + " Please enable javascript on your browser.").__();
     html.
       script().$type("text/javascript").
-        _("$('#jsnotice').hide();")._();
+        __("$('#jsnotice').hide();").__();
   }
 
   protected void initAccordions(List<String> list) {
@@ -130,7 +130,7 @@ public class JQueryUI extends HtmlBlock {
         // for inserting stateSaveInit
         int pos = init.indexOf('{') + 1;  
         init = new StringBuffer(init).insert(pos, stateSaveInit).toString(); 
-        list.add(join(id,"DataTable =  $('#", id, "').dataTable(", init,
+        list.add(join(id, "DataTable =  $('#", id, "').dataTable(", init,
                       ").fnSetFilteringDelay(188);"));
         String postInit = $(postInitID(DATATABLES, id));
         if(!postInit.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/LipsumBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/LipsumBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/LipsumBlock.java
index 4781a20..a4b6f63 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/LipsumBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/LipsumBlock.java
@@ -27,7 +27,7 @@ public class LipsumBlock extends HtmlBlock {
   public void render(Block html) {
     html.
       p().
-        _("Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
+        __("Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
           "Vivamus eu dui in ipsum tincidunt egestas ac sed nibh.",
           "Praesent quis nisl lorem, nec interdum urna.",
           "Duis sagittis dignissim purus sed sollicitudin.",
@@ -45,6 +45,6 @@ public class LipsumBlock extends HtmlBlock {
           "Proin eu ante nisl, vel porttitor eros.",
           "Aliquam gravida luctus augue, at scelerisque enim consectetur vel.",
           "Donec interdum tempor nisl, quis laoreet enim venenatis eu.",
-          "Quisque elit elit, vulputate eget porta vel, laoreet ac lacus.")._();
+          "Quisque elit elit, vulputate eget porta vel, laoreet ac lacus.").__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java
index cdc13eb..a684eee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java
@@ -30,12 +30,12 @@ public class NavBlock extends HtmlBlock {
         ul().
           li("Item 1").
           li("Item 2").
-          li("...")._().
+          li("...").__().
         h3("Tools").
         ul().
-          li().a("/conf", "Configuration")._().
-          li().a("/stacks", "Thread dump")._().
-          li().a("/logs", "Logs")._().
-          li().a("/jmx?qry=Hadoop:*", "Metrics")._()._()._();
+          li().a("/conf", "Configuration").__().
+          li().a("/stacks", "Thread dump").__().
+          li().a("/logs", "Logs").__().
+          li().a("/jmx?qry=Hadoop:*", "Metrics").__().__().__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnCssLayout.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnCssLayout.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnCssLayout.java
index 23aa51b..3e83199 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnCssLayout.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnCssLayout.java
@@ -31,11 +31,11 @@ import org.apache.hadoop.yarn.webapp.SubView;
 @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
 public class TwoColumnCssLayout extends HtmlPage {
 
-  @Override protected void render(Page.HTML<_> html) {
+  @Override protected void render(Page.HTML<__> html) {
     preHead(html);
     html.
       title($("title")).
-      link(root_url("static","yarn.css")).
+      link(root_url("static", "yarn.css")).
       style(".main { min-height: 100%; height: auto !important; height: 100%;",
             "  margin: 0 auto -4em; border: 0; }",
             ".footer, .push { height: 4em; clear: both; border: 0 }",
@@ -50,28 +50,28 @@ public class TwoColumnCssLayout extends HtmlPage {
             "  right: 100%; overflow: hidden; }",
             ".leftnav .nav { float: left; width: 11em; position: relative;",
             "  right: 12em; overflow: hidden; }").
-      _(JQueryUI.class);
+        __(JQueryUI.class);
     postHead(html);
     JQueryUI.jsnotice(html);
     html.
       div(".main.ui-widget-content").
-        _(header()).
+        __(header()).
         div(".cmask.leftnav").
           div(".c1right").
             div(".c1wrap").
               div(".content").
-                _(content())._()._().
+        __(content()).__().__().
             div(".nav").
-              _(nav()).
-              div(".push")._()._()._()._()._().
+        __(nav()).
+              div(".push").__().__().__().__().__().
       div(".footer.ui-widget-content").
-        _(footer())._()._();
+        __(footer()).__().__();
   }
 
-  protected void preHead(Page.HTML<_> html) {
+  protected void preHead(Page.HTML<__> html) {
   }
 
-  protected void postHead(Page.HTML<_> html) {
+  protected void postHead(Page.HTML<__> html) {
   }
 
   protected Class<? extends SubView> header() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java
index 4d7752d..fe71395 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java
@@ -39,18 +39,18 @@ public class TwoColumnLayout extends HtmlPage {
    * (non-Javadoc)
    * @see org.apache.hadoop.yarn.webapp.view.HtmlPage#render(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
    */
-  @Override protected void render(Page.HTML<_> html) {
+  @Override protected void render(Page.HTML<__> html) {
     preHead(html);
     html.
       title($(TITLE)).
-      link(root_url("static","yarn.css")).
+      link(root_url("static", "yarn.css")).
       style("#layout { height: 100%; }",
             "#layout thead td { height: 3em; }",
             "#layout #navcell { width: 11em; padding: 0 1em; }",
             "#layout td.content { padding-top: 0 }",
             "#layout tbody { vertical-align: top; }",
             "#layout tfoot td { height: 4em; }").
-      _(JQueryUI.class);
+        __(JQueryUI.class);
     postHead(html);
     JQueryUI.jsnotice(html);
     html.
@@ -58,17 +58,17 @@ public class TwoColumnLayout extends HtmlPage {
         thead().
           tr().
             td().$colspan(2).
-              _(header())._()._()._().
+        __(header()).__().__().__().
         tfoot().
           tr().
             td().$colspan(2).
-              _(footer())._()._()._().
+        __(footer()).__().__().__().
         tbody().
           tr().
             td().$id("navcell").
-              _(nav())._().
+        __(nav()).__().
             td().$class("content").
-              _(content())._()._()._()._()._();
+        __(content()).__().__().__().__().__();
   }
 
   /**
@@ -76,14 +76,14 @@ public class TwoColumnLayout extends HtmlPage {
    * involves setting page variables for Javascript and CSS rendering.
    * @param html the html to use to render. 
    */
-  protected void preHead(Page.HTML<_> html) {
+  protected void preHead(Page.HTML<__> html) {
   }
 
   /**
    * Do what needs to be done after the header is rendered.
    * @param html the html to use to render. 
    */
-  protected void postHead(Page.HTML<_> html) {
+  protected void postHead(Page.HTML<__> html) {
   }
 
   /**
@@ -120,7 +120,7 @@ public class TwoColumnLayout extends HtmlPage {
    * @param tableId the ID of the table to set styles on.
    * @param innerStyles any other styles to add to the table.
    */
-  protected void setTableStyles(Page.HTML<_> html, String tableId,
+  protected void setTableStyles(Page.HTML<__> html, String tableId,
                                 String... innerStyles) {
     List<String> styles = Lists.newArrayList();
     styles.add(join('#', tableId, "_paginate span {font-weight:normal}"));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestSubViews.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestSubViews.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestSubViews.java
index 66d9ef2..075bed2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestSubViews.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestSubViews.java
@@ -32,14 +32,14 @@ public class TestSubViews {
 
   static public class MainView extends HtmlPage {
     @Override
-    public void render(Page.HTML<_> html) {
+    public void render(Page.HTML<__> html) {
       html.
         body().
           div().
-            _(Sub1.class)._().
+          __(Sub1.class).__().
           div().
             i("inline text").
-            _(Sub2.class)._()._()._();
+          __(Sub2.class).__().__().__();
     }
   }
 
@@ -48,7 +48,7 @@ public class TestSubViews {
     public void render(Block html) {
       html.
         div("#sub1").
-          _("sub1 text")._();
+          __("sub1 text").__();
     }
   }
 
@@ -57,7 +57,7 @@ public class TestSubViews {
     public void render(Block html) {
       html.
         pre().
-          _("sub2 text")._();
+          __("sub2 text").__();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
index db50dd3..dea146d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
@@ -100,7 +100,7 @@ public class TestWebApp {
 
   static class TablesView extends HtmlPage {
     @Override
-    public void render(Page.HTML<_> html) {
+    public void render(Page.HTML<__> html) {
       set(DATATABLES_ID, "t1 t2 t3 t4");
       set(initID(DATATABLES, "t1"), tableInit().append("}").toString());
       set(initID(DATATABLES, "t2"), join("{bJQueryUI:true, sDom:'t',",
@@ -110,7 +110,7 @@ public class TestWebApp {
       html.
         title("Test DataTables").
         link("/static/yarn.css").
-        _(JQueryUI.class).
+          __(JQueryUI.class).
         style(".wrapper { padding: 1em }",
               ".wrapper h2 { margin: 0.5em 0 }",
               ".dataTables_wrapper { min-height: 1em }").
@@ -118,33 +118,33 @@ public class TestWebApp {
           h2("Default table init").
           table("#t1").
             thead().
-              tr().th("Column1").th("Column2")._()._().
+              tr().th("Column1").th("Column2").__().__().
             tbody().
-              tr().td("c1r1").td("c2r1")._().
-              tr().td("c1r2").td("c2r2")._()._()._().
+              tr().td("c1r1").td("c2r1").__().
+              tr().td("c1r2").td("c2r2").__().__().__().
           h2("Nested tables").
           div(_INFO_WRAP).
             table("#t2").
               thead().
-                tr().th(_TH, "Column1").th(_TH, "Column2")._()._().
+                tr().th(_TH, "Column1").th(_TH, "Column2").__().__().
               tbody().
                 tr().td("r1"). // th wouldn't work as of dt 1.7.5
                   td().$class(C_TABLE).
                     table("#t3").
                       thead().
-                        tr().th("SubColumn1").th("SubColumn2")._()._().
+                        tr().th("SubColumn1").th("SubColumn2").__().__().
                       tbody().
-                        tr().td("subc1r1").td("subc2r1")._().
-                        tr().td("subc1r2").td("subc2r2")._()._()._()._()._().
+                        tr().td("subc1r1").td("subc2r1").__().
+                        tr().td("subc1r2").td("subc2r2").__().__().__().__().__().
                 tr().td("r2"). // ditto
                   td().$class(C_TABLE).
                     table("#t4").
                       thead().
-                        tr().th("SubColumn1").th("SubColumn2")._()._().
+                        tr().th("SubColumn1").th("SubColumn2").__().__().
                       tbody().
-                        tr().td("subc1r1").td("subc2r1")._().
-                        tr().td("subc1r2").td("subc2r2")._().
-                        _()._()._()._()._()._()._()._()._();
+                        tr().td("subc1r1").td("subc2r1").__().
+                        tr().td("subc1r2").td("subc2r2").__().
+          __().__().__().__().__().__().__().__().__();
     }
   }
 
@@ -358,7 +358,7 @@ public class TestWebApp {
       assertEquals("foo", getContent(baseUrl +"test/foo").trim());
       app1 = WebApps.$for("test", this).at(port).start();
       assertEquals(port, app1.getListenerAddress().getPort());
-      app2 = WebApps.$for("test", this).at("0.0.0.0",port, true).start();
+      app2 = WebApps.$for("test", this).at("0.0.0.0", port, true).start();
       assertTrue(app2.getListenerAddress().getPort() > port);
       Configuration conf = new Configuration();
       port =  ServerSocketUtil.waitForPort(47000, 60);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlBlock.java
index 89042c6..e510dd5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlBlock.java
@@ -24,8 +24,6 @@ import java.io.PrintWriter;
 
 import org.apache.hadoop.yarn.webapp.WebAppException;
 import org.apache.hadoop.yarn.webapp.test.WebAppTests;
-import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
-import org.apache.hadoop.yarn.webapp.view.HtmlPage;
 
 import org.junit.Test;
 import static org.mockito.Mockito.*;
@@ -35,7 +33,7 @@ public class TestHtmlBlock {
     @Override
     public void render(Block html) {
       html.
-        p("#testid")._("test note")._();
+        p("#testid").__("test note").__();
     }
   }
 
@@ -43,16 +41,16 @@ public class TestHtmlBlock {
     @Override
     public void render(Block html) {
       html.
-        p()._("should throw");
+        p().__("should throw");
     }
   }
 
   public static class ShortPage extends HtmlPage {
     @Override
-    public void render(Page.HTML<_> html) {
+    public void render(Page.HTML<__> html) {
       html.
         title("short test").
-        _(ShortBlock.class);
+          __(ShortBlock.class);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlPage.java
index a5a8e1f..beed31f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlPage.java
@@ -25,7 +25,6 @@ import java.io.PrintWriter;
 import org.apache.hadoop.yarn.webapp.MimeType;
 import org.apache.hadoop.yarn.webapp.WebAppException;
 import org.apache.hadoop.yarn.webapp.test.WebAppTests;
-import org.apache.hadoop.yarn.webapp.view.HtmlPage;
 import org.junit.Test;
 
 import static org.mockito.Mockito.*;
@@ -34,19 +33,19 @@ public class TestHtmlPage {
   
   public static class TestView extends HtmlPage {
     @Override
-    public void render(Page.HTML<_> html) {
+    public void render(Page.HTML<__> html) {
       html.
         title("test").
-        p("#testid")._("test note")._()._();
+        p("#testid").__("test note").__().__();
     }
   }
 
   public static class ShortView extends HtmlPage {
     @Override
-    public void render(Page.HTML<_> html) {
+    public void render(Page.HTML<__> html) {
       html.
         title("short test").
-        p()._("should throw");
+        p().__("should throw");
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestInfoBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestInfoBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestInfoBlock.java
index da5efbb..751aa2c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestInfoBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestInfoBlock.java
@@ -45,7 +45,7 @@ public class TestInfoBlock {
 
     static {
       resInfo = new ResponseInfo();
-      resInfo._("User_Name", JAVASCRIPT);
+      resInfo.__("User_Name", JAVASCRIPT);
     }
 
     @Override
@@ -68,8 +68,8 @@ public class TestInfoBlock {
 
     static {
       resInfo = new ResponseInfo();
-      resInfo._("Multiple_line_value", "This is one line.");
-      resInfo._("Multiple_line_value", "This is first line.\nThis is second line.");	
+      resInfo.__("Multiple_line_value", "This is one line.");
+      resInfo.__("Multiple_line_value", "This is first line.\nThis is second line.");
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnCssPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnCssPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnCssPage.java
index a718636..20df409 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnCssPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnCssPage.java
@@ -22,8 +22,6 @@ import org.apache.hadoop.yarn.MockApps;
 import org.apache.hadoop.yarn.webapp.Controller;
 import org.apache.hadoop.yarn.webapp.WebApps;
 import org.apache.hadoop.yarn.webapp.test.WebAppTests;
-import org.apache.hadoop.yarn.webapp.view.HtmlPage;
-import org.apache.hadoop.yarn.webapp.view.TwoColumnCssLayout;
 import org.junit.Test;
 
 public class TestTwoColumnCssPage {
@@ -52,10 +50,10 @@ public class TestTwoColumnCssPage {
 
   public static class TestView extends HtmlPage {
     @Override
-    public void render(Page.HTML<_> html) {
+    public void render(Page.HTML<__> html) {
       html.
         title($("title")).
-        h1($("title"))._();
+        h1($("title")).__();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSErrorsAndWarningsPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSErrorsAndWarningsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSErrorsAndWarningsPage.java
index 3798ee5..1601f8c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSErrorsAndWarningsPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSErrorsAndWarningsPage.java
@@ -34,7 +34,7 @@ public class AHSErrorsAndWarningsPage extends AHSView {
   }
 
   @Override
-  protected void preHead(Page.HTML<_> html) {
+  protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     String title = "Errors and Warnings in the Application History Server";
     setTitle(title);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java
index 8821bc0..d845503 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java
@@ -32,7 +32,7 @@ public class AHSLogsPage extends AHSView {
    * preHead(org.apache.hadoop .yarn.webapp.hamlet.Hamlet.HTML)
    */
   @Override
-  protected void preHead(Page.HTML<_> html) {
+  protected void preHead(Page.HTML<__> html) {
     String logEntity = $(ENTITY_STRING);
     if (logEntity == null || logEntity.isEmpty()) {
       logEntity = $(CONTAINER_ID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
index 65b5ac1..d965eeb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
@@ -37,7 +37,7 @@ public class AHSView extends TwoColumnLayout {
   static final int MAX_FAST_ROWS = 1000; // inline js array
 
   @Override
-  protected void preHead(Page.HTML<_> html) {
+  protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     set(DATATABLES_ID, "apps");
     set(initID(DATATABLES, "apps"), WebPageUtils.appsTableInit(false));
@@ -49,7 +49,7 @@ public class AHSView extends TwoColumnLayout {
     setTitle(sjoin(reqState, "Applications"));
   }
 
-  protected void commonPreHead(Page.HTML<_> html) {
+  protected void commonPreHead(Page.HTML<__> html) {
     set(ACCORDION_ID, "nav");
     set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutBlock.java
index b2419e9..996568b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutBlock.java
@@ -19,9 +19,7 @@
 package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
 
 import com.google.inject.Inject;
-import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
-import org.apache.hadoop.yarn.util.YarnVersionInfo;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.hadoop.yarn.webapp.View;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
@@ -38,10 +36,10 @@ public class AboutBlock extends HtmlBlock {
     TimelineAbout tsInfo = TimelineUtils.createTimelineAbout(
         "Timeline Server - Generic History Service UI");
     info("Timeline Server Overview").
-        _("Timeline Server Version:", tsInfo.getTimelineServiceBuildVersion() +
+        __("Timeline Server Version:", tsInfo.getTimelineServiceBuildVersion() +
             " on " + tsInfo.getTimelineServiceVersionBuiltOn()).
-        _("Hadoop Version:", tsInfo.getHadoopBuildVersion() +
+        __("Hadoop Version:", tsInfo.getHadoopBuildVersion() +
             " on " + tsInfo.getHadoopVersionBuiltOn());
-    html._(InfoBlock.class);
+    html.__(InfoBlock.class);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutPage.java
index b50073a..1df5832 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutPage.java
@@ -20,12 +20,9 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
 
 
 import org.apache.hadoop.yarn.webapp.SubView;
-import org.apache.hadoop.yarn.webapp.YarnWebParams;
-
-import static org.apache.hadoop.yarn.util.StringHelper.join;
 
 public class AboutPage extends AHSView {
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     set(TITLE, "Timeline Server - Generic History Service");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
index a08297d..ec00db6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams;
 public class AppAttemptPage extends AHSView {
 
   @Override
-  protected void preHead(Page.HTML<_> html) {
+  protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
 
     String appAttemptId = $(YarnWebParams.APPLICATION_ATTEMPT_ID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
index c0e1394..32fcc95 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams;
 public class AppPage extends AHSView {
 
   @Override
-  protected void preHead(Page.HTML<_> html) {
+  protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
 
     String appId = $(YarnWebParams.APPLICATION_ID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java
index 1be8a26..8327ee6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams;
 public class ContainerPage extends AHSView {
 
   @Override
-  protected void preHead(Page.HTML<_> html) {
+  protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
 
     String containerId = $(YarnWebParams.CONTAINER_ID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
index 25ee4f0..3ee4dd1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
@@ -23,7 +23,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 public class NavBlock extends HtmlBlock {
@@ -44,34 +44,34 @@ public class NavBlock extends HtmlBlock {
             h3("Application History").
                 ul().
                     li().a(url("about"), "About").
-                    _().
+        __().
                     li().a(url("apps"), "Applications").
                         ul().
                             li().a(url("apps",
                                 YarnApplicationState.FINISHED.toString()),
                                 YarnApplicationState.FINISHED.toString()).
-                            _().
+        __().
                             li().a(url("apps",
                                 YarnApplicationState.FAILED.toString()),
                                 YarnApplicationState.FAILED.toString()).
-                            _().
+        __().
                             li().a(url("apps",
                                 YarnApplicationState.KILLED.toString()),
                                 YarnApplicationState.KILLED.toString()).
-                            _().
-                        _().
-                    _().
-                _();
+        __().
+        __().
+        __().
+        __();
 
     Hamlet.UL<Hamlet.DIV<Hamlet>> tools = nav.h3("Tools").ul();
-    tools.li().a("/conf", "Configuration")._()
-        .li().a("/logs", "Local logs")._()
-        .li().a("/stacks", "Server stacks")._()
-        .li().a("/jmx?qry=Hadoop:*", "Server metrics")._();
+    tools.li().a("/conf", "Configuration").__()
+        .li().a("/logs", "Local logs").__()
+        .li().a("/stacks", "Server stacks").__()
+        .li().a("/jmx?qry=Hadoop:*", "Server metrics").__();
 
     if (addErrorsAndWarningsLink) {
-      tools.li().a(url("errors-and-warnings"), "Errors/Warnings")._();
+      tools.li().a(url("errors-and-warnings"), "Errors/Warnings").__();
     }
-    tools._()._();
+    tools.__().__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
index 798c372..87c554d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
@@ -36,10 +36,9 @@ import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
-import org.apache.hadoop.yarn.util.ConverterUtils;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 import com.google.inject.Inject;
@@ -94,7 +93,7 @@ public class AppAttemptBlock extends HtmlBlock {
       String message =
           "Failed to read the application attempt " + appAttemptId + ".";
       LOG.error(message, e);
-      html.p()._(message)._();
+      html.p().__(message).__();
       return;
     }
 
@@ -140,14 +139,14 @@ public class AppAttemptBlock extends HtmlBlock {
     if (exceptionWhenGetContainerReports) {
       html
         .p()
-        ._(
+        .__(
           "Sorry, Failed to get containers for application attempt" + attemptid
-              + ".")._();
+              + ".").__();
       return;
     }
 
     createAttemptHeadRoomTable(html);
-    html._(InfoBlock.class);
+    html.__(InfoBlock.class);
 
     createTablesForAttemptMetrics(html);
 
@@ -155,7 +154,7 @@ public class AppAttemptBlock extends HtmlBlock {
     TBODY<TABLE<Hamlet>> tbody =
         html.table("#containers").thead().tr().th(".id", "Container ID")
           .th(".node", "Node").th(".exitstatus", "Container Exit Status")
-          .th(".logs", "Logs")._()._().tbody();
+          .th(".logs", "Logs").__().__().tbody();
 
     StringBuilder containersTableData = new StringBuilder("[\n");
     for (ContainerReport containerReport : containers) {
@@ -186,9 +185,9 @@ public class AppAttemptBlock extends HtmlBlock {
     }
     containersTableData.append("]");
     html.script().$type("text/javascript")
-      ._("var containersTableData=" + containersTableData)._();
+      .__("var containersTableData=" + containersTableData).__();
 
-    tbody._()._();
+    tbody.__().__();
   }
 
   protected void generateOverview(ApplicationAttemptReport appAttemptReport,
@@ -196,18 +195,18 @@ public class AppAttemptBlock extends HtmlBlock {
       String node) {
     String amContainerId = appAttempt.getAmContainerId();
     info("Application Attempt Overview")
-      ._(
+      .__(
         "Application Attempt State:",
         appAttempt.getAppAttemptState() == null ? UNAVAILABLE : appAttempt
           .getAppAttemptState())
-      ._("AM Container:",
+      .__("AM Container:",
           amContainerId == null
               || containers == null
               || !hasAMContainer(appAttemptReport.getAMContainerId(),
                   containers) ? null : root_url("container", amContainerId),
           amContainerId == null ? "N/A" : amContainerId)
-      ._("Node:", node)
-      ._(
+      .__("Node:", node)
+      .__(
         "Tracking URL:",
         appAttempt.getTrackingUrl() == null
             || appAttempt.getTrackingUrl().equals(UNAVAILABLE) ? null
@@ -219,7 +218,7 @@ public class AppAttemptBlock extends HtmlBlock {
                 || appAttempt.getAppAttemptState() == YarnApplicationAttemptState.FAILED
                 || appAttempt.getAppAttemptState() == YarnApplicationAttemptState.KILLED
                 ? "History" : "ApplicationMaster")
-      ._(
+      .__(
         "Diagnostics Info:",
         appAttempt.getDiagnosticsInfo() == null ? "" : appAttempt
           .getDiagnosticsInfo());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index 11bd9b4..d4090aa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -55,9 +55,9 @@ import org.apache.hadoop.yarn.util.Apps;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
@@ -116,7 +116,7 @@ public class AppBlock extends HtmlBlock {
     } catch (Exception e) {
       String message = "Failed to read the application " + appID + ".";
       LOG.error(message, e);
-      html.p()._(message)._();
+      html.p().__(message).__();
       return;
     }
 
@@ -136,8 +136,8 @@ public class AppBlock extends HtmlBlock {
       // Application Kill
       html.div()
         .button()
-          .$onclick("confirmAction()").b("Kill Application")._()
-          ._();
+          .$onclick("confirmAction()").b("Kill Application").__()
+          .__();
 
       StringBuilder script = new StringBuilder();
       script.append("function confirmAction() {")
@@ -160,7 +160,7 @@ public class AppBlock extends HtmlBlock {
           .append(" }")
           .append("}");
 
-      html.script().$type("text/javascript")._(script.toString())._();
+      html.script().$type("text/javascript").__(script.toString()).__();
     }
 
     String schedulerPath = WebAppUtils.getResolvedRMWebAppURLWithScheme(conf) +
@@ -185,13 +185,13 @@ public class AppBlock extends HtmlBlock {
       String message =
           "Failed to read the attempts of the application " + appID + ".";
       LOG.error(message, e);
-      html.p()._(message)._();
+      html.p().__(message).__();
       return;
     }
 
     createApplicationMetricsTable(html);
 
-    html._(InfoBlock.class);
+    html.__(InfoBlock.class);
 
     generateApplicationTable(html, callerUGI, attempts);
 
@@ -207,25 +207,25 @@ public class AppBlock extends HtmlBlock {
   private void generateOverviewTable(AppInfo app, String schedulerPath,
       String webUiType, ApplicationReport appReport) {
     ResponseInfo overviewTable = info("Application Overview")
-        ._("User:", schedulerPath, app.getUser())
-        ._("Name:", app.getName())
-        ._("Application Type:", app.getType())
-        ._("Application Tags:",
+        .__("User:", schedulerPath, app.getUser())
+        .__("Name:", app.getName())
+        .__("Application Type:", app.getType())
+        .__("Application Tags:",
             app.getApplicationTags() == null ? "" : app.getApplicationTags())
-        ._("Application Priority:", clarifyAppPriority(app.getPriority()))
-        ._(
+        .__("Application Priority:", clarifyAppPriority(app.getPriority()))
+        .__(
             "YarnApplicationState:",
             app.getAppState() == null ? UNAVAILABLE : clarifyAppState(app
                 .getAppState()))
-        ._("Queue:", schedulerPath, app.getQueue())
-        ._("FinalStatus Reported by AM:",
+        .__("Queue:", schedulerPath, app.getQueue())
+        .__("FinalStatus Reported by AM:",
             clairfyAppFinalStatus(app.getFinalAppStatus()))
-        ._("Started:", Times.format(app.getStartedTime()))
-        ._(
+        .__("Started:", Times.format(app.getStartedTime()))
+        .__(
             "Elapsed:",
             StringUtils.formatTime(Times.elapsed(app.getStartedTime(),
                 app.getFinishedTime())))
-        ._(
+        .__(
             "Tracking URL:",
             app.getTrackingUrl() == null
                 || app.getTrackingUrl().equals(UNAVAILABLE) ? null : root_url(app
@@ -240,31 +240,31 @@ public class AppBlock extends HtmlBlock {
         && webUiType.equals(YarnWebParams.RM_WEB_UI)) {
       LogAggregationStatus status = getLogAggregationStatus();
       if (status == null) {
-        overviewTable._("Log Aggregation Status:", "N/A");
+        overviewTable.__("Log Aggregation Status:", "N/A");
       } else if (status == LogAggregationStatus.DISABLED
           || status == LogAggregationStatus.NOT_START
           || status == LogAggregationStatus.SUCCEEDED) {
-        overviewTable._("Log Aggregation Status:", status.name());
+        overviewTable.__("Log Aggregation Status:", status.name());
       } else {
-        overviewTable._("Log Aggregation Status:",
+        overviewTable.__("Log Aggregation Status:",
             root_url("logaggregationstatus", app.getAppId()), status.name());
       }
       long timeout = appReport.getApplicationTimeouts()
           .get(ApplicationTimeoutType.LIFETIME).getRemainingTime();
       if (timeout < 0) {
-        overviewTable._("Application Timeout (Remaining Time):", "Unlimited");
+        overviewTable.__("Application Timeout (Remaining Time):", "Unlimited");
       } else {
-        overviewTable._("Application Timeout (Remaining Time):",
+        overviewTable.__("Application Timeout (Remaining Time):",
             String.format("%d seconds", timeout));
       }
     }
-    overviewTable._("Diagnostics:",
+    overviewTable.__("Diagnostics:",
         app.getDiagnosticsInfo() == null ? "" : app.getDiagnosticsInfo());
-    overviewTable._("Unmanaged Application:", app.isUnmanagedApp());
-    overviewTable._("Application Node Label expression:",
+    overviewTable.__("Unmanaged Application:", app.isUnmanagedApp());
+    overviewTable.__("Application Node Label expression:",
         app.getAppNodeLabelExpression() == null ? "<Not set>"
             : app.getAppNodeLabelExpression());
-    overviewTable._("AM container Node Label expression:",
+    overviewTable.__("AM container Node Label expression:",
         app.getAmNodeLabelExpression() == null ? "<Not set>"
             : app.getAmNodeLabelExpression());
   }
@@ -276,7 +276,7 @@ public class AppBlock extends HtmlBlock {
     TBODY<TABLE<Hamlet>> tbody =
         html.table("#attempts").thead().tr().th(".id", "Attempt ID")
           .th(".started", "Started").th(".node", "Node").th(".logs", "Logs")
-          ._()._().tbody();
+          .__().__().tbody();
 
     StringBuilder attemptsTableData = new StringBuilder("[\n");
     for (final ApplicationAttemptReport appAttemptReport : attempts) {
@@ -312,7 +312,7 @@ public class AppBlock extends HtmlBlock {
             "Failed to read the AM container of the application attempt "
                 + appAttemptReport.getApplicationAttemptId() + ".";
         LOG.error(message, e);
-        html.p()._(message)._();
+        html.p().__(message).__();
         return;
       }
       long startTime = 0L;
@@ -346,9 +346,9 @@ public class AppBlock extends HtmlBlock {
     }
     attemptsTableData.append("]");
     html.script().$type("text/javascript")
-      ._("var attemptsTableData=" + attemptsTableData)._();
+      .__("var attemptsTableData=" + attemptsTableData).__();
 
-    tbody._()._();
+    tbody.__().__();
   }
 
   private String clarifyAppState(YarnApplicationState state) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java
index 7f42343..d836e64 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java
@@ -44,9 +44,9 @@ import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -134,7 +134,7 @@ public class AppsBlock extends HtmlBlock {
     catch( Exception e) {
       String message = "Failed to read the applications.";
       LOG.error(message, e);
-      html.p()._(message)._();
+      html.p().__(message).__();
       return;
     }
     renderData(html);
@@ -147,7 +147,7 @@ public class AppsBlock extends HtmlBlock {
           .th(".queue", "Queue").th(".priority", "Application Priority")
           .th(".starttime", "StartTime").th(".finishtime", "FinishTime")
           .th(".state", "State").th(".finalstatus", "FinalStatus")
-          .th(".progress", "Progress").th(".ui", "Tracking UI")._()._().tbody();
+          .th(".progress", "Progress").th(".ui", "Tracking UI").__().__().tbody();
 
     StringBuilder appsTableData = new StringBuilder("[\n");
     for (ApplicationReport appReport : appReports) {
@@ -218,8 +218,8 @@ public class AppsBlock extends HtmlBlock {
     }
     appsTableData.append("]");
     html.script().$type("text/javascript")
-      ._("var appsTableData=" + appsTableData)._();
+      .__("var appsTableData=" + appsTableData).__();
 
-    tbody._()._();
+    tbody.__().__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java
index 893e823..fa35a3d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
@@ -86,7 +85,7 @@ public class ContainerBlock extends HtmlBlock {
     } catch (Exception e) {
       String message = "Failed to read the container " + containerid + ".";
       LOG.error(message, e);
-      html.p()._(message)._();
+      html.p().__(message).__();
       return;
     }
 
@@ -99,32 +98,32 @@ public class ContainerBlock extends HtmlBlock {
     setTitle(join("Container ", containerid));
 
     info("Container Overview")
-      ._(
+      .__(
         "Container State:",
         container.getContainerState() == null ? UNAVAILABLE : container
           .getContainerState())
-      ._("Exit Status:", container.getContainerExitStatus())
-      ._(
+      .__("Exit Status:", container.getContainerExitStatus())
+      .__(
         "Node:",
         container.getNodeHttpAddress() == null ? "#" : container
           .getNodeHttpAddress(),
         container.getNodeHttpAddress() == null ? "N/A" : container
           .getNodeHttpAddress())
-      ._("Priority:", container.getPriority())
-      ._("Started:", Times.format(container.getStartedTime()))
-      ._(
+      .__("Priority:", container.getPriority())
+      .__("Started:", Times.format(container.getStartedTime()))
+      .__(
         "Elapsed:",
         StringUtils.formatTime(Times.elapsed(container.getStartedTime(),
           container.getFinishedTime())))
-      ._(
+      .__(
         "Resource:",
         container.getAllocatedMB() + " Memory, "
             + container.getAllocatedVCores() + " VCores")
-      ._("Logs:", container.getLogUrl() == null ? "#" : container.getLogUrl(),
+      .__("Logs:", container.getLogUrl() == null ? "#" : container.getLogUrl(),
           container.getLogUrl() == null ? "N/A" : "Logs")
-      ._("Diagnostics:", container.getDiagnosticsInfo() == null ?
+      .__("Diagnostics:", container.getDiagnosticsInfo() == null ?
           "" : container.getDiagnosticsInfo());
 
-    html._(InfoBlock.class);
+    html.__(InfoBlock.class);
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java
index 56db66c..75f6fcd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.security.AdminACLsManager;
 import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
 import org.apache.hadoop.yarn.util.Times;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import java.util.ArrayList;
@@ -74,19 +74,19 @@ public class ErrorsAndWarningsBlock extends HtmlBlock {
     }
 
     if (!isAdmin) {
-      html.div().p()._("This page is for admins only.")._()._();
+      html.div().p().__("This page is for admins only.").__().__();
       return;
     }
 
     if (log instanceof Log4JLogger) {
-      html._(ErrorMetrics.class);
-      html._(WarningMetrics.class);
+      html.__(ErrorMetrics.class);
+      html.__(WarningMetrics.class);
       html.div().button().$onclick("reloadPage()").b("View data for the last ")
-        ._().select().$id("cutoff").option().$value("60")._("1 min")._()
-        .option().$value("300")._("5 min")._().option().$value("900")
-        ._("15 min")._().option().$value("3600")._("1 hour")._().option()
-        .$value("21600")._("6 hours")._().option().$value("43200")
-        ._("12 hours")._().option().$value("86400")._("24 hours")._()._()._();
+        .__().select().$id("cutoff").option().$value("60").__("1 min").__()
+        .option().$value("300").__("5 min").__().option().$value("900")
+        .__("15 min").__().option().$value("3600").__("1 hour").__().option()
+        .$value("21600").__("6 hours").__().option().$value("43200")
+        .__("12 hours").__().option().$value("86400").__("24 hours").__().__().__();
 
       String script = "function reloadPage() {"
           + " var timePeriod = $(\"#cutoff\").val();"
@@ -97,7 +97,7 @@ public class ErrorsAndWarningsBlock extends HtmlBlock {
           + "  $(element).parent().siblings('.toggle-content').fadeToggle();"
           + "}";
 
-      html.script().$type("text/javascript")._(script)._();
+      html.script().$type("text/javascript").__(script).__();
 
       html.style(".toggle-content { display: none; }");
 
@@ -110,7 +110,7 @@ public class ErrorsAndWarningsBlock extends HtmlBlock {
       Hamlet.TBODY<Hamlet.TABLE<Hamlet>> errorsTable =
           html.table("#messages").thead().tr().th(".message", "Message")
             .th(".type", "Type").th(".count", "Count")
-            .th(".lasttime", "Latest Message Time")._()._().tbody();
+            .th(".lasttime", "Latest Message Time").__().__().tbody();
 
       // cutoff has to be in seconds
       cutoff.add((Time.now() - cutoffPeriodSeconds * 1000) / 1000);
@@ -145,18 +145,18 @@ public class ErrorsAndWarningsBlock extends HtmlBlock {
               }
 
               cell.pre().a().$href("#").$onclick("toggleContent(this);")
-                .$style("white-space: pre")._(displayMessage)._()._().div()
-                .$class("toggle-content").pre()._(message)._()._()._();
+                .$style("white-space: pre").__(displayMessage).__().__().div()
+                .$class("toggle-content").pre().__(message).__().__().__();
             } else {
-              cell.pre()._(message)._()._();
+              cell.pre().__(message).__().__();
             }
             Log4jWarningErrorMetricsAppender.Element ele = entry.getValue();
             row.td(type).td(String.valueOf(ele.count))
-              .td(Times.format(ele.timestampSeconds * 1000))._();
+              .td(Times.format(ele.timestampSeconds * 1000)).__();
           }
         }
       }
-      errorsTable._()._();
+      errorsTable.__().__();
     }
   }
 
@@ -199,19 +199,19 @@ public class ErrorsAndWarningsBlock extends HtmlBlock {
             html.div().$class("metrics").$style("padding-bottom: 20px");
         div.h3(tableHeading).table("#metricsoverview").thead()
           .$class("ui-widget-header").tr().th().$class("ui-state-default")
-          ._("Last 1 minute")._().th().$class("ui-state-default")
-          ._("Last 5 minutes")._().th().$class("ui-state-default")
-          ._("Last 15 minutes")._().th().$class("ui-state-default")
-          ._("Last 1 hour")._().th().$class("ui-state-default")
-          ._("Last 6 hours")._().th().$class("ui-state-default")
-          ._("Last 12 hours")._().th().$class("ui-state-default")
-          ._("Last 24 hours")._()._()._().tbody().$class("ui-widget-content")
+          .__("Last 1 minute").__().th().$class("ui-state-default")
+          .__("Last 5 minutes").__().th().$class("ui-state-default")
+          .__("Last 15 minutes").__().th().$class("ui-state-default")
+          .__("Last 1 hour").__().th().$class("ui-state-default")
+          .__("Last 6 hours").__().th().$class("ui-state-default")
+          .__("Last 12 hours").__().th().$class("ui-state-default")
+          .__("Last 24 hours").__().__().__().tbody().$class("ui-widget-content")
           .tr().td(String.valueOf(values.get(0)))
           .td(String.valueOf(values.get(1))).td(String.valueOf(values.get(2)))
           .td(String.valueOf(values.get(3))).td(String.valueOf(values.get(4)))
           .td(String.valueOf(values.get(5))).td(String.valueOf(values.get(6)))
-          ._()._()._();
-        div._();
+          .__().__().__();
+        div.__();
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java
index d32b271..6425da6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java
@@ -31,17 +31,17 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Ap
 import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.BODY;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.BODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
 
 public class AllApplicationsPage extends NMView {
 
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     setTitle("Applications running on this node");
     set(DATATABLES_ID, "applications");
@@ -88,22 +88,22 @@ public class AllApplicationsPage extends NMView {
             .table("#applications")
               .thead()
                 .tr()
-                  .td()._("ApplicationId")._()
-                  .td()._("ApplicationState")._()
-                ._()
-               ._()
+                  .td().__("ApplicationId").__()
+                  .td().__("ApplicationState").__()
+                .__()
+               .__()
                .tbody();
       for (Entry<ApplicationId, Application> entry : this.nmContext
           .getApplications().entrySet()) {
         AppInfo info = new AppInfo(entry.getValue());
         tableBody
           .tr()
-            .td().a(url("application", info.getId()), info.getId())._()
-            .td()._(info.getState())
-            ._()
-          ._();
+            .td().a(url("application", info.getId()), info.getId()).__()
+            .td().__(info.getState())
+            .__()
+          .__();
       }
-      tableBody._()._()._();
+      tableBody.__().__().__();
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java
index 24b8575..3fc6f3c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java
@@ -31,17 +31,17 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont
 import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainerInfo;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.BODY;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.BODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
 
 public class AllContainersPage extends NMView {
 
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     setTitle("All containers running on this node");
     set(DATATABLES_ID, "containers");
@@ -82,24 +82,24 @@ public class AllContainersPage extends NMView {
         .table("#containers")
           .thead()
             .tr()
-              .td()._("ContainerId")._()
-              .td()._("ContainerState")._()
-              .td()._("logs")._()
-            ._()
-          ._().tbody();
+              .td().__("ContainerId").__()
+              .td().__("ContainerState").__()
+              .td().__("logs").__()
+            .__()
+          .__().tbody();
       for (Entry<ContainerId, Container> entry : this.nmContext
           .getContainers().entrySet()) {
         ContainerInfo info = new ContainerInfo(this.nmContext, entry.getValue());
         tableBody
           .tr()
             .td().a(url("container", info.getId()), info.getId())
-            ._()
-            .td()._(info.getState())._()
+            .__()
+            .td().__(info.getState()).__()
             .td()
-                .a(url(info.getShortLogLink()), "logs")._()
-          ._();
+                .a(url(info.getShortLogLink()), "logs").__()
+          .__();
       }
-      tableBody._()._()._();
+      tableBody.__().__().__();
     }
 
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java
index 2783b18..00f80ef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java
@@ -30,12 +30,11 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
 import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppInfo;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
@@ -43,7 +42,7 @@ import com.google.inject.Inject;
 
 public class ApplicationPage extends NMView implements YarnWebParams {
 
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
 
     set(DATATABLES_ID, "containers");
@@ -80,30 +79,30 @@ public class ApplicationPage extends NMView implements YarnWebParams {
       try {
         applicationID = ApplicationId.fromString($(APPLICATION_ID));
       } catch (IllegalArgumentException e) {
-        html.p()._("Invalid Application Id " + $(APPLICATION_ID))._();
+        html.p().__("Invalid Application Id " + $(APPLICATION_ID)).__();
         return;
       }
       DIV<Hamlet> div = html.div("#content");
       Application app = this.nmContext.getApplications().get(applicationID);
       if (app == null) {
         div.h1("Unknown application with id " + applicationID
-            + ". Application might have been completed")._();
+            + ". Application might have been completed").__();
         return;
       }
       AppInfo info = new AppInfo(app);
       info("Application's information")
-            ._("ApplicationId", info.getId())
-            ._("ApplicationState", info.getState())
-            ._("User", info.getUser());
-      TABLE<Hamlet> containersListBody = html._(InfoBlock.class)
+            .__("ApplicationId", info.getId())
+            .__("ApplicationState", info.getState())
+            .__("User", info.getUser());
+      TABLE<Hamlet> containersListBody = html.__(InfoBlock.class)
           .table("#containers");
       for (String containerIdStr : info.getContainers()) {
         containersListBody
                .tr().td()
                  .a(url("container", containerIdStr), containerIdStr)
-                 ._()._();
+                 .__().__();
       }
-      containersListBody._();
+      containersListBody.__();
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
index 3e5f4d2..f619e2f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
@@ -37,12 +37,11 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.webapp.NotFoundException;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.PRE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.PRE;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -51,7 +50,7 @@ public class ContainerLogsPage extends NMView {
   
   public static final String REDIRECT_URL = "redirect.url";
   
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     String redirectUrl = $(REDIRECT_URL);
     if (redirectUrl == null || redirectUrl.isEmpty()) {
       set(TITLE, join("Logs for ", $(CONTAINER_ID)));
@@ -142,10 +141,10 @@ public class ContainerLogsPage extends NMView {
         try {
           long toRead = end - start;
           if (toRead < logFile.length()) {
-            html.p()._("Showing " + toRead + " bytes. Click ")
+            html.p().__("Showing " + toRead + " bytes. Click ")
                 .a(url("containerlogs", $(CONTAINER_ID), $(APP_OWNER), 
                     logFile.getName(), "?start=0"), "here").
-                    _(" for full log")._();
+                __(" for full log").__();
           }
           
           IOUtils.skipFully(logByteStream, start);
@@ -160,12 +159,12 @@ public class ContainerLogsPage extends NMView {
 
           while ((len = reader.read(cbuf, 0, currentToRead)) > 0
               && toRead > 0) {
-            pre._(new String(cbuf, 0, len));
+            pre.__(new String(cbuf, 0, len));
             toRead = toRead - len;
             currentToRead = toRead > bufferSize ? bufferSize : (int) toRead;
           }
 
-          pre._();
+          pre.__();
           reader.close();
 
         } catch (IOException e) {
@@ -199,7 +198,7 @@ public class ContainerLogsPage extends NMView {
                 .a(url("containerlogs", $(CONTAINER_ID), $(APP_OWNER),
                     logFile.getName(), "?start=-4096"),
                     logFile.getName() + " : Total file length is "
-                        + logFile.length() + " bytes.")._();
+                        + logFile.length() + " bytes.").__();
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
index 4beccc9..8117dca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
@@ -25,11 +25,10 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainerInfo;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
@@ -38,7 +37,7 @@ import com.google.inject.Inject;
 public class ContainerPage extends NMView implements YarnWebParams {
 
   @Override
-  protected void preHead(Page.HTML<_> html) {
+  protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
 
     setTitle("Container " + $(CONTAINER_ID));
@@ -65,7 +64,7 @@ public class ContainerPage extends NMView implements YarnWebParams {
       try {
         containerID = ContainerId.fromString($(CONTAINER_ID));
       } catch (IllegalArgumentException e) {
-        html.p()._("Invalid containerId " + $(CONTAINER_ID))._();
+        html.p().__("Invalid containerId " + $(CONTAINER_ID)).__();
         return;
       }
 
@@ -73,22 +72,22 @@ public class ContainerPage extends NMView implements YarnWebParams {
       Container container = this.nmContext.getContainers().get(containerID);
       if (container == null) {
         div.h1("Unknown Container. Container might have completed, "
-                + "please go back to the previous page and retry.")._();
+                + "please go back to the previous page and retry.").__();
         return;
       }
       ContainerInfo info = new ContainerInfo(this.nmContext, container);
 
       info("Container information")
-        ._("ContainerID", info.getId())
-        ._("ContainerState", info.getState())
-        ._("ExitStatus", info.getExitStatus())
-        ._("Diagnostics", info.getDiagnostics())
-        ._("User", info.getUser())
-        ._("TotalMemoryNeeded", info.getMemoryNeeded())
-        ._("TotalVCoresNeeded", info.getVCoresNeeded())
-        ._("ExecutionType", info.getExecutionType())
-        ._("logs", info.getShortLogLink(), "Link to logs");
-      html._(InfoBlock.class);
+        .__("ContainerID", info.getId())
+        .__("ContainerState", info.getState())
+        .__("ExitStatus", info.getExitStatus())
+        .__("Diagnostics", info.getDiagnostics())
+        .__("User", info.getUser())
+        .__("TotalMemoryNeeded", info.getMemoryNeeded())
+        .__("TotalVCoresNeeded", info.getVCoresNeeded())
+        .__("ExecutionType", info.getExecutionType())
+        .__("logs", info.getShortLogLink(), "Link to logs");
+      html.__(InfoBlock.class);
     }
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: HADOOP-14578. Bind IPC connections to kerberos UPN host for proxy users. Contributed by Daryn Sharp.

Posted by xg...@apache.org.
HADOOP-14578. Bind IPC connections to kerberos UPN host for proxy users. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27a1a5fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27a1a5fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27a1a5fd

Branch: refs/heads/YARN-5734
Commit: 27a1a5fde94d4d7ea0ed172635c146d594413781
Parents: a92bf39
Author: Kihwal Lee <ki...@apache.org>
Authored: Wed Jul 26 13:12:39 2017 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Wed Jul 26 13:12:39 2017 -0500

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/ipc/Client.java | 34 +++++----
 .../java/org/apache/hadoop/ipc/TestIPC.java     | 76 ++++++++++++++++++++
 2 files changed, 96 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27a1a5fd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 1daf803..c9ac615 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -633,7 +633,8 @@ public class Client implements AutoCloseable {
       return false;
     }
     
-    private synchronized void setupConnection() throws IOException {
+    private synchronized void setupConnection(
+        UserGroupInformation ticket) throws IOException {
       short ioFailures = 0;
       short timeoutFailures = 0;
       while (true) {
@@ -661,24 +662,26 @@ public class Client implements AutoCloseable {
            * client, to ensure Server matching address of the client connection
            * to host name in principal passed.
            */
-          UserGroupInformation ticket = remoteId.getTicket();
+          InetSocketAddress bindAddr = null;
           if (ticket != null && ticket.hasKerberosCredentials()) {
             KerberosInfo krbInfo = 
               remoteId.getProtocol().getAnnotation(KerberosInfo.class);
-            if (krbInfo != null && krbInfo.clientPrincipal() != null) {
-              String host = 
-                SecurityUtil.getHostFromPrincipal(remoteId.getTicket().getUserName());
-              
+            if (krbInfo != null) {
+              String principal = ticket.getUserName();
+              String host = SecurityUtil.getHostFromPrincipal(principal);
               // If host name is a valid local address then bind socket to it
               InetAddress localAddr = NetUtils.getLocalInetAddress(host);
               if (localAddr != null) {
                 this.socket.setReuseAddress(true);
-                this.socket.bind(new InetSocketAddress(localAddr, 0));
+                if (LOG.isDebugEnabled()) {
+                  LOG.debug("Binding " + principal + " to " + localAddr);
+                }
+                bindAddr = new InetSocketAddress(localAddr, 0);
               }
             }
           }
           
-          NetUtils.connect(this.socket, server, connectionTimeout);
+          NetUtils.connect(this.socket, server, bindAddr, connectionTimeout);
           this.socket.setSoTimeout(soTimeout);
           return;
         } catch (ConnectTimeoutException toe) {
@@ -762,7 +765,14 @@ public class Client implements AutoCloseable {
         AtomicBoolean fallbackToSimpleAuth) {
       if (socket != null || shouldCloseConnection.get()) {
         return;
-      } 
+      }
+      UserGroupInformation ticket = remoteId.getTicket();
+      if (ticket != null) {
+        final UserGroupInformation realUser = ticket.getRealUser();
+        if (realUser != null) {
+          ticket = realUser;
+        }
+      }
       try {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Connecting to "+server);
@@ -774,14 +784,10 @@ public class Client implements AutoCloseable {
         short numRetries = 0;
         Random rand = null;
         while (true) {
-          setupConnection();
+          setupConnection(ticket);
           ipcStreams = new IpcStreams(socket, maxResponseLength);
           writeConnectionHeader(ipcStreams);
           if (authProtocol == AuthProtocol.SASL) {
-            UserGroupInformation ticket = remoteId.getTicket();
-            if (ticket.getRealUser() != null) {
-              ticket = ticket.getRealUser();
-            }
             try {
               authMethod = ticket
                   .doAs(new PrivilegedExceptionAction<AuthMethod>() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27a1a5fd/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index 3416746..a4577f2 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -39,9 +39,12 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
+import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.Socket;
+import java.net.SocketAddress;
+import java.net.SocketException;
 import java.net.SocketTimeoutException;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -76,6 +79,7 @@ import org.apache.hadoop.ipc.Server.Connection;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
 import org.apache.hadoop.net.ConnectTimeoutException;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
@@ -1484,6 +1488,78 @@ public class TestIPC {
     Assert.fail("didn't get limit exceeded");
   }
 
+  @Test
+  public void testUserBinding() throws Exception {
+    checkUserBinding(false);
+  }
+
+  @Test
+  public void testProxyUserBinding() throws Exception {
+    checkUserBinding(true);
+  }
+
+  private void checkUserBinding(boolean asProxy) throws Exception {
+    Socket s;
+    // don't attempt bind with no service host.
+    s = checkConnect(null, asProxy);
+    Mockito.verify(s, Mockito.never()).bind(Mockito.any(SocketAddress.class));
+
+    // don't attempt bind with service host not belonging to this host.
+    s = checkConnect("1.2.3.4", asProxy);
+    Mockito.verify(s, Mockito.never()).bind(Mockito.any(SocketAddress.class));
+
+    // do attempt bind when service host is this host.
+    InetAddress addr = InetAddress.getLocalHost();
+    s = checkConnect(addr.getHostAddress(), asProxy);
+    Mockito.verify(s).bind(new InetSocketAddress(addr, 0));
+  }
+
+  // dummy protocol that claims to support kerberos.
+  @KerberosInfo(serverPrincipal = "server@REALM")
+  private static class TestBindingProtocol {
+  }
+
+  private Socket checkConnect(String addr, boolean asProxy) throws Exception {
+    // create a fake ugi that claims to have kerberos credentials.
+    StringBuilder principal = new StringBuilder();
+    principal.append("client");
+    if (addr != null) {
+      principal.append("/").append(addr);
+    }
+    principal.append("@REALM");
+    UserGroupInformation ugi =
+        spy(UserGroupInformation.createRemoteUser(principal.toString()));
+    Mockito.doReturn(true).when(ugi).hasKerberosCredentials();
+    if (asProxy) {
+      ugi = UserGroupInformation.createProxyUser("proxy", ugi);
+    }
+
+    // create a mock socket that throws on connect.
+    SocketException expectedConnectEx =
+        new SocketException("Expected connect failure");
+    Socket s = Mockito.mock(Socket.class);
+    SocketFactory mockFactory = Mockito.mock(SocketFactory.class);
+    Mockito.doReturn(s).when(mockFactory).createSocket();
+    doThrow(expectedConnectEx).when(s).connect(
+        Mockito.any(SocketAddress.class), Mockito.anyInt());
+
+    // do a dummy call and expect it to throw an exception on connect.
+    // tests should verify if/how a bind occurred.
+    try (Client client = new Client(LongWritable.class, conf, mockFactory)) {
+      final InetSocketAddress sockAddr = new InetSocketAddress(0);
+      final LongWritable param = new LongWritable(RANDOM.nextLong());
+      final ConnectionId remoteId = new ConnectionId(
+          sockAddr, TestBindingProtocol.class, ugi, 0,
+          RetryPolicies.TRY_ONCE_THEN_FAIL, conf);
+      client.call(RPC.RpcKind.RPC_BUILTIN, param, remoteId, null);
+      fail("call didn't throw connect exception");
+    } catch (SocketException se) {
+      // ipc layer re-wraps exceptions, so check the cause.
+      Assert.assertSame(expectedConnectEx, se.getCause());
+    }
+    return s;
+  }
+
   private void doIpcVersionTest(
       byte[] requestData,
       byte[] expectedResponse) throws IOException {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: HDFS-12044. Mismatch between BlockManager.maxReplicationStreams and ErasureCodingWorker.stripedReconstructionPool pool size causes slow and bursty recovery. (Contributed by Lei (Eddy) Xu)

Posted by xg...@apache.org.
HDFS-12044. Mismatch between BlockManager.maxReplicationStreams and ErasureCodingWorker.stripedReconstructionPool pool size causes slow and bursty recovery. (Contributed by Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77791e4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77791e4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77791e4c

Branch: refs/heads/YARN-5734
Commit: 77791e4c36ddc9305306c83806bf486d4d32575d
Parents: 9ea01fd
Author: Lei Xu <le...@cloudera.com>
Authored: Fri Jul 28 10:49:23 2017 -0700
Committer: Lei Xu <le...@cloudera.com>
Committed: Fri Jul 28 10:50:49 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   | 23 +++++-
 .../hadoop/hdfs/server/datanode/DataNode.java   | 21 ++++++
 .../erasurecode/ErasureCodingWorker.java        | 15 +++-
 .../erasurecode/StripedBlockReconstructor.java  |  3 +-
 .../datanode/erasurecode/StripedReader.java     | 20 ++++++
 .../erasurecode/StripedReconstructionInfo.java  | 15 ++++
 .../erasurecode/StripedReconstructor.java       |  8 ++-
 .../hadoop/hdfs/TestReconstructStripedFile.java | 74 ++++++++++++++++++--
 8 files changed, 169 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77791e4c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 2e770cc..e7cd0d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -83,6 +83,7 @@ import java.util.List;
 import java.util.Locale;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
@@ -811,10 +812,30 @@ public class DFSUtilClient {
   public static ThreadPoolExecutor getThreadPoolExecutor(int corePoolSize,
       int maxPoolSize, long keepAliveTimeSecs, String threadNamePrefix,
       boolean runRejectedExec) {
+    return getThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTimeSecs,
+        new SynchronousQueue<>(), threadNamePrefix, runRejectedExec);
+}
+
+  /**
+   * Utility to create a {@link ThreadPoolExecutor}.
+   *
+   * @param corePoolSize - min threads in the pool, even if idle
+   * @param maxPoolSize - max threads in the pool
+   * @param keepAliveTimeSecs - max seconds beyond which excess idle threads
+   *        will be terminated
+   * @param queue - the queue to use for holding tasks before they are executed.
+   * @param threadNamePrefix - name prefix for the pool threads
+   * @param runRejectedExec - when true, rejected tasks from
+   *        ThreadPoolExecutor are run in the context of calling thread
+   * @return ThreadPoolExecutor
+   */
+  public static ThreadPoolExecutor getThreadPoolExecutor(int corePoolSize,
+      int maxPoolSize, long keepAliveTimeSecs, BlockingQueue<Runnable> queue,
+      String threadNamePrefix, boolean runRejectedExec) {
     Preconditions.checkArgument(corePoolSize > 0);
     ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(corePoolSize,
         maxPoolSize, keepAliveTimeSecs, TimeUnit.SECONDS,
-        new SynchronousQueue<Runnable>(), new Daemon.DaemonFactory() {
+        queue, new Daemon.DaemonFactory() {
           private final AtomicInteger threadIndex = new AtomicInteger(0);
 
           @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77791e4c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 2730393..6069487 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2204,12 +2204,33 @@ public class DataNode extends ReconfigurableBase
   }
 
   /**
+   * Increments the xmitInProgress count by given value.
+   *
+   * @param delta the amount of xmitsInProgress to increase.
+   * @see #incrementXmitsInProgress()
+   */
+  public void incrementXmitsInProcess(int delta) {
+    Preconditions.checkArgument(delta >= 0);
+    xmitsInProgress.getAndAdd(delta);
+  }
+
+  /**
    * Decrements the xmitsInProgress count
    */
   public void decrementXmitsInProgress() {
     xmitsInProgress.getAndDecrement();
   }
 
+  /**
+   * Decrements the xmitsInProgress count by given value.
+   *
+   * @see #decrementXmitsInProgress()
+   */
+  public void decrementXmitsInProgress(int delta) {
+    Preconditions.checkArgument(delta >= 0);
+    xmitsInProgress.getAndAdd(0 - delta);
+  }
+
   private void reportBadBlock(final BPOfferService bpos,
       final ExtendedBlock block, final String msg) {
     FsVolumeSpi volume = getFSDataset().getVolume(block);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77791e4c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index e076dda..72c224f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.util.Daemon;
 import org.slf4j.Logger;
 
 import java.util.Collection;
+import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
@@ -93,7 +94,8 @@ public final class ErasureCodingWorker {
     LOG.debug("Using striped block reconstruction; pool threads={}",
         numThreads);
     stripedReconstructionPool = DFSUtilClient.getThreadPoolExecutor(2,
-        numThreads, 60, "StripedBlockReconstruction-", false);
+        numThreads, 60, new LinkedBlockingQueue<>(),
+        "StripedBlockReconstruction-", false);
     stripedReconstructionPool.allowCoreThreadTimeOut(true);
   }
 
@@ -106,6 +108,7 @@ public final class ErasureCodingWorker {
   public void processErasureCodingTasks(
       Collection<BlockECReconstructionInfo> ecTasks) {
     for (BlockECReconstructionInfo reconInfo : ecTasks) {
+      int xmitsSubmitted = 0;
       try {
         StripedReconstructionInfo stripedReconInfo =
             new StripedReconstructionInfo(
@@ -113,15 +116,25 @@ public final class ErasureCodingWorker {
             reconInfo.getLiveBlockIndices(), reconInfo.getSourceDnInfos(),
             reconInfo.getTargetDnInfos(), reconInfo.getTargetStorageTypes(),
             reconInfo.getTargetStorageIDs());
+        // It may throw IllegalArgumentException from task#stripedReader
+        // constructor.
         final StripedBlockReconstructor task =
             new StripedBlockReconstructor(this, stripedReconInfo);
         if (task.hasValidTargets()) {
+          // See HDFS-12044. We increase xmitsInProgress even the task is only
+          // enqueued, so that
+          //   1) NN will not send more tasks than what DN can execute and
+          //   2) DN will not throw away reconstruction tasks, and instead keeps
+          //      an unbounded number of tasks in the executor's task queue.
+          xmitsSubmitted = task.getXmits();
+          getDatanode().incrementXmitsInProcess(xmitsSubmitted);
           stripedReconstructionPool.submit(task);
         } else {
           LOG.warn("No missing internal block. Skip reconstruction for task:{}",
               reconInfo);
         }
       } catch (Throwable e) {
+        getDatanode().decrementXmitsInProgress(xmitsSubmitted);
         LOG.warn("Failed to reconstruct striped block {}",
             reconInfo.getExtendedBlock().getLocalBlock(), e);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77791e4c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
index 1119bbb..bac013a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
@@ -48,7 +48,6 @@ class StripedBlockReconstructor extends StripedReconstructor
 
   @Override
   public void run() {
-    getDatanode().incrementXmitsInProgress();
     try {
       initDecoderIfNecessary();
 
@@ -66,7 +65,7 @@ class StripedBlockReconstructor extends StripedReconstructor
       LOG.warn("Failed to reconstruct striped block: {}", getBlockGroup(), e);
       getDatanode().getMetrics().incrECFailedReconstructionTasks();
     } finally {
-      getDatanode().decrementXmitsInProgress();
+      getDatanode().decrementXmitsInProgress(getXmits());
       final DataNodeMetrics metrics = getDatanode().getMetrics();
       metrics.incrECReconstructionTasks();
       metrics.incrECReconstructionBytesRead(getBytesRead());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77791e4c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java
index f6f343a..96f9791 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java
@@ -68,6 +68,8 @@ class StripedReader {
   private int[] successList;
 
   private final int minRequiredSources;
+  // the number of xmits used by the re-construction task.
+  private final int xmits;
   // The buffers and indices for striped blocks whose length is 0
   private ByteBuffer[] zeroStripeBuffers;
   private short[] zeroStripeIndices;
@@ -107,6 +109,12 @@ class StripedReader {
       zeroStripeIndices = new short[zeroStripNum];
     }
 
+    // It is calculated by the maximum number of connections from either sources
+    // or targets.
+    xmits = Math.max(minRequiredSources,
+        stripedReconInfo.getTargets() != null ?
+        stripedReconInfo.getTargets().length : 0);
+
     this.liveIndices = stripedReconInfo.getLiveIndices();
     assert liveIndices != null;
     this.sources = stripedReconInfo.getSources();
@@ -472,4 +480,16 @@ class StripedReader {
   CachingStrategy getCachingStrategy() {
     return reconstructor.getCachingStrategy();
   }
+
+  /**
+   * Return the xmits of this EC reconstruction task.
+   * <p>
+   * DN uses it to coordinate with NN to adjust the speed of scheduling the
+   * EC reconstruction tasks to this DN.
+   *
+   * @return the xmits of this reconstruction task.
+   */
+  int getXmits() {
+    return xmits;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77791e4c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java
index a619c34..0a3e125 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java
@@ -103,5 +103,20 @@ public class StripedReconstructionInfo {
   String[] getTargetStorageIds() {
     return targetStorageIds;
   }
+
+  /**
+   * Return the weight of this EC reconstruction task.
+   *
+   * DN uses it to coordinate with NN to adjust the speed of scheduling the
+   * reconstructions tasks to this DN.
+   *
+   * @return the weight of this reconstruction task.
+   * @see HDFS-12044
+   */
+  int getWeight() {
+    // See HDFS-12044. The weight of a RS(n, k) is calculated by the network
+    // connections it opens.
+    return sources.length + targets.length;
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77791e4c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
index b8433c7..3202121 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
@@ -133,7 +133,6 @@ abstract class StripedReconstructor {
     }
     blockGroup = stripedReconInfo.getBlockGroup();
     stripedReader = new StripedReader(this, datanode, conf, stripedReconInfo);
-
     cachingStrategy = CachingStrategy.newDefaultStrategy();
 
     positionInBlock = 0L;
@@ -233,6 +232,13 @@ abstract class StripedReconstructor {
     return blockGroup;
   }
 
+  /**
+   * Get the xmits that _will_ be used for this reconstruction task.
+   */
+  int getXmits() {
+    return stripedReader.getXmits();
+  }
+
   BitSet getLiveBitSet() {
     return liveBitSet;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77791e4c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index 29e4028..7cd34c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -25,6 +25,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.BitSet;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -44,6 +45,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
@@ -81,6 +83,7 @@ public class TestReconstructStripedFile {
     Any
   }
 
+  private Configuration conf;
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
   // Map: DatanodeID -> datanode index in cluster
@@ -89,7 +92,7 @@ public class TestReconstructStripedFile {
 
   @Before
   public void setup() throws IOException {
-    final Configuration conf = new Configuration();
+    conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY,
@@ -263,6 +266,14 @@ public class TestReconstructStripedFile {
     return stoppedDNs;
   }
 
+  private static void writeFile(DistributedFileSystem fs, String fileName,
+      int fileLen) throws Exception {
+    final byte[] data = new byte[fileLen];
+    Arrays.fill(data, (byte) 1);
+    DFSTestUtil.writeFile(fs, new Path(fileName), data);
+    StripedFileTestUtil.waitBlockGroupsReported(fs, fileName);
+  }
+
   /**
    * Test the file blocks reconstruction.
    * 1. Check the replica is reconstructed in the target datanode,
@@ -278,10 +289,7 @@ public class TestReconstructStripedFile {
 
     Path file = new Path(fileName);
 
-    final byte[] data = new byte[fileLen];
-    Arrays.fill(data, (byte) 1);
-    DFSTestUtil.writeFile(fs, file, data);
-    StripedFileTestUtil.waitBlockGroupsReported(fs, fileName);
+    writeFile(fs, fileName, fileLen);
 
     LocatedBlocks locatedBlocks =
         StripedFileTestUtil.getLocatedBlocks(file, fs);
@@ -424,4 +432,60 @@ public class TestReconstructStripedFile {
     ecTasks.add(invalidECInfo);
     dataNode.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
   }
+
+  // HDFS-12044
+  @Test(timeout = 60000)
+  public void testNNSendsErasureCodingTasks() throws Exception {
+    testNNSendsErasureCodingTasks(1);
+    testNNSendsErasureCodingTasks(2);
+  }
+
+  private void testNNSendsErasureCodingTasks(int deadDN) throws Exception {
+    cluster.shutdown();
+
+    final int numDataNodes = dnNum  + 1;
+    conf.setInt(
+        DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 10);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 20);
+    conf.setInt(DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_BLK_THREADS_KEY,
+        2);
+    cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(numDataNodes).build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    ErasureCodingPolicy policy = StripedFileTestUtil.getDefaultECPolicy();
+    fs.getClient().setErasureCodingPolicy("/", policy.getName());
+
+    final int fileLen = cellSize * ecPolicy.getNumDataUnits() * 2;
+    for (int i = 0; i < 100; i++) {
+      writeFile(fs, "/ec-file-" + i, fileLen);
+    }
+
+    // Inject data-loss by tear down desired number of DataNodes.
+    assertTrue(policy.getNumParityUnits() >= deadDN);
+    List<DataNode> dataNodes = new ArrayList<>(cluster.getDataNodes());
+    Collections.shuffle(dataNodes);
+    for (DataNode dn : dataNodes.subList(0, deadDN)) {
+      shutdownDataNode(dn);
+    }
+
+    final FSNamesystem ns = cluster.getNamesystem();
+    GenericTestUtils.waitFor(() -> ns.getPendingDeletionBlocks() == 0,
+        500, 30000);
+
+    // Make sure that all pending reconstruction tasks can be processed.
+    while (ns.getPendingReconstructionBlocks() > 0) {
+      long timeoutPending = ns.getNumTimedOutPendingReconstructions();
+      assertTrue(String.format("Found %d timeout pending reconstruction tasks",
+          timeoutPending), timeoutPending == 0);
+      Thread.sleep(1000);
+    }
+
+    // Verify all DN reaches zero xmitsInProgress.
+    GenericTestUtils.waitFor(() ->
+        cluster.getDataNodes().stream().mapToInt(
+            DataNode::getXmitsInProgress).sum() == 0,
+        500, 30000
+    );
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: YARN-5952. Create REST API for changing YARN scheduler configurations. (Jonathan Hung via wangda)

Posted by xg...@apache.org.
YARN-5952. Create REST API for changing YARN scheduler configurations. (Jonathan Hung via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/342bea02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/342bea02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/342bea02

Branch: refs/heads/YARN-5734
Commit: 342bea02aa582cfea509537829e7feb1c914ab9d
Parents: ac11f49
Author: Wangda Tan <wa...@apache.org>
Authored: Mon Apr 3 10:12:01 2017 -0700
Committer: Xuan <xg...@apache.org>
Committed: Mon Jul 31 08:58:43 2017 -0700

----------------------------------------------------------------------
 .../scheduler/MutableConfScheduler.java         |  40 ++
 .../scheduler/MutableConfigurationProvider.java |   5 +-
 .../scheduler/capacity/CapacityScheduler.java   |  16 +-
 .../conf/InMemoryConfigurationStore.java        |   6 +-
 .../conf/MutableCSConfigurationProvider.java    |  24 +-
 .../resourcemanager/webapp/RMWebServices.java   | 172 ++++++-
 .../webapp/dao/QueueConfigInfo.java             |  57 +++
 .../webapp/dao/QueueConfigsUpdateInfo.java      |  60 +++
 .../TestMutableCSConfigurationProvider.java     |   6 +-
 .../TestRMWebServicesConfigurationMutation.java | 477 +++++++++++++++++++
 10 files changed, 851 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/342bea02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
new file mode 100644
index 0000000..35e36e1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.security.UserGroupInformation;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Interface for a scheduler that supports changing configuration at runtime.
+ *
+ */
+public interface MutableConfScheduler extends ResourceScheduler {
+
+  /**
+   * Update the scheduler's configuration.
+   * @param user Caller of this update
+   * @param confUpdate key-value map of the configuration update
+   * @throws IOException if update is invalid
+   */
+  void updateConfiguration(UserGroupInformation user,
+      Map<String, String> confUpdate) throws IOException;
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/342bea02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index da30a2b..889c3bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import java.io.IOException;
 import java.util.Map;
 
 /**
@@ -29,7 +30,9 @@ public interface MutableConfigurationProvider {
    * Update the scheduler configuration with the provided key value pairs.
    * @param user User issuing the request
    * @param confUpdate Key-value pairs for configurations to be updated.
+   * @throws IOException if scheduler could not be reinitialized
    */
-  void mutateConfiguration(String user, Map<String, String> confUpdate);
+  void mutateConfiguration(String user, Map<String, String> confUpdate)
+      throws IOException;
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/342bea02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index ca6e872..ac1748a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -86,6 +86,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnSched
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerUpdates;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfigurationProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
@@ -150,7 +152,7 @@ import com.google.common.util.concurrent.SettableFuture;
 public class CapacityScheduler extends
     AbstractYarnScheduler<FiCaSchedulerApp, FiCaSchedulerNode> implements
     PreemptableResourceScheduler, CapacitySchedulerContext, Configurable,
-    ResourceAllocationCommitter {
+    ResourceAllocationCommitter, MutableConfScheduler {
 
   private static final Log LOG = LogFactory.getLog(CapacityScheduler.class);
 
@@ -2512,4 +2514,16 @@ public class CapacityScheduler extends
       writeLock.unlock();
     }
   }
+
+  @Override
+  public void updateConfiguration(UserGroupInformation user,
+      Map<String, String> confUpdate) throws IOException {
+    if (csConfProvider instanceof MutableConfigurationProvider) {
+      ((MutableConfigurationProvider) csConfProvider).mutateConfiguration(
+          user.getShortUserName(), confUpdate);
+    } else {
+      throw new UnsupportedOperationException("Configured CS configuration " +
+          "provider does not support updating configuration.");
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/342bea02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
index a208fb9..b97be1b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
@@ -58,7 +58,11 @@ public class InMemoryConfigurationStore implements YarnConfigurationStore {
         if (isValid) {
           Map<String, String> mutations = mutation.getUpdates();
           for (Map.Entry<String, String> kv : mutations.entrySet()) {
-            schedConf.set(kv.getKey(), kv.getValue());
+            if (kv.getValue() == null) {
+              schedConf.unset(kv.getKey());
+            } else {
+              schedConf.set(kv.getKey(), kv.getValue());
+            }
           }
         }
         return true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/342bea02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
index 267ab6a..ea1b3c0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
@@ -60,34 +60,44 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
     }
     Configuration initialSchedConf = new Configuration(false);
     initialSchedConf.addResource(YarnConfiguration.CS_CONFIGURATION_FILE);
-    this.schedConf = initialSchedConf;
-    confStore.initialize(config, initialSchedConf);
+    this.schedConf = new Configuration(false);
+    // We need to explicitly set the key-values in schedConf, otherwise
+    // these configuration keys cannot be deleted when
+    // configuration is reloaded.
+    for (Map.Entry<String, String> kv : initialSchedConf) {
+      schedConf.set(kv.getKey(), kv.getValue());
+    }
+    confStore.initialize(config, schedConf);
     this.conf = config;
   }
 
   @Override
   public CapacitySchedulerConfiguration loadConfiguration(Configuration
       configuration) throws IOException {
-    Configuration loadedConf = new Configuration(configuration);
-    loadedConf.addResource(schedConf);
+    Configuration loadedConf = new Configuration(schedConf);
+    loadedConf.addResource(configuration);
     return new CapacitySchedulerConfiguration(loadedConf, false);
   }
 
   @Override
   public void mutateConfiguration(String user,
-      Map<String, String> confUpdate) {
+      Map<String, String> confUpdate) throws IOException {
     Configuration oldConf = new Configuration(schedConf);
     LogMutation log = new LogMutation(confUpdate, user);
     long id = confStore.logMutation(log);
     for (Map.Entry<String, String> kv : confUpdate.entrySet()) {
-      schedConf.set(kv.getKey(), kv.getValue());
+      if (kv.getValue() == null) {
+        schedConf.unset(kv.getKey());
+      } else {
+        schedConf.set(kv.getKey(), kv.getValue());
+      }
     }
     try {
       rmContext.getScheduler().reinitialize(conf, rmContext);
     } catch (IOException e) {
       schedConf = oldConf;
       confStore.confirmMutation(id, false);
-      return;
+      throw e;
     }
     confStore.confirmMutation(id, true);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/342bea02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index c537b7e..56a0bf8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -55,7 +55,8 @@ import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response.Status;
-
+import com.google.common.base.Joiner;
+import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -127,11 +128,14 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
@@ -2404,4 +2408,170 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
         app.getApplicationTimeouts().get(appTimeout.getTimeoutType()));
     return Response.status(Status.OK).entity(timeout).build();
   }
+
+  @PUT
+  @Path("/queues")
+  @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
+      MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  public Response updateSchedulerConfiguration(QueueConfigsUpdateInfo
+      mutationInfo, @Context HttpServletRequest hsr)
+      throws AuthorizationException, InterruptedException {
+    init();
+
+    UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+    ApplicationACLsManager aclsManager = rm.getApplicationACLsManager();
+    if (aclsManager.areACLsEnabled()) {
+      if (callerUGI == null || !aclsManager.isAdmin(callerUGI)) {
+        String msg = "Only admins can carry out this operation.";
+        throw new ForbiddenException(msg);
+      }
+    }
+
+    ResourceScheduler scheduler = rm.getResourceScheduler();
+    if (scheduler instanceof MutableConfScheduler) {
+      try {
+        callerUGI.doAs(new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws IOException, YarnException {
+            Map<String, String> confUpdate =
+                constructKeyValueConfUpdate(mutationInfo);
+            ((CapacityScheduler) scheduler).updateConfiguration(callerUGI,
+                confUpdate);
+            return null;
+          }
+        });
+      } catch (IOException e) {
+        return Response.status(Status.BAD_REQUEST).entity(e.getMessage())
+            .build();
+      }
+      return Response.status(Status.OK).entity("Configuration change " +
+          "successfully applied.").build();
+    } else {
+      return Response.status(Status.BAD_REQUEST)
+          .entity("Configuration change only supported by CapacityScheduler.")
+          .build();
+    }
+  }
+
+  private Map<String, String> constructKeyValueConfUpdate(
+      QueueConfigsUpdateInfo mutationInfo) throws IOException {
+    CapacitySchedulerConfiguration currentConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    CapacitySchedulerConfiguration proposedConf =
+        new CapacitySchedulerConfiguration(currentConf, false);
+    Map<String, String> confUpdate = new HashMap<>();
+    for (String queueToRemove : mutationInfo.getRemoveQueueInfo()) {
+      removeQueue(queueToRemove, proposedConf, confUpdate);
+    }
+    for (QueueConfigInfo addQueueInfo : mutationInfo.getAddQueueInfo()) {
+      addQueue(addQueueInfo, proposedConf, confUpdate);
+    }
+    for (QueueConfigInfo updateQueueInfo : mutationInfo.getUpdateQueueInfo()) {
+      updateQueue(updateQueueInfo, proposedConf, confUpdate);
+    }
+    return confUpdate;
+  }
+
+  private void removeQueue(
+      String queueToRemove, CapacitySchedulerConfiguration proposedConf,
+      Map<String, String> confUpdate) throws IOException {
+    if (queueToRemove == null) {
+      return;
+    } else {
+      CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+      String queueName = queueToRemove.substring(
+          queueToRemove.lastIndexOf('.') + 1);
+      CSQueue queue = cs.getQueue(queueName);
+      if (queue == null ||
+          !queue.getQueuePath().equals(queueToRemove)) {
+        throw new IOException("Queue " + queueToRemove + " not found");
+      } else if (queueToRemove.lastIndexOf('.') == -1) {
+        throw new IOException("Can't remove queue " + queueToRemove);
+      }
+      String parentQueuePath = queueToRemove.substring(0, queueToRemove
+          .lastIndexOf('.'));
+      String[] siblingQueues = proposedConf.getQueues(parentQueuePath);
+      List<String> newSiblingQueues = new ArrayList<>();
+      for (String siblingQueue : siblingQueues) {
+        if (!siblingQueue.equals(queueName)) {
+          newSiblingQueues.add(siblingQueue);
+        }
+      }
+      proposedConf.setQueues(parentQueuePath, newSiblingQueues
+          .toArray(new String[0]));
+      String queuesConfig = CapacitySchedulerConfiguration.PREFIX +
+          parentQueuePath + CapacitySchedulerConfiguration.DOT +
+          CapacitySchedulerConfiguration.QUEUES;
+      if (newSiblingQueues.size() == 0) {
+        confUpdate.put(queuesConfig, null);
+      } else {
+        confUpdate.put(queuesConfig, Joiner.on(',').join(newSiblingQueues));
+      }
+      for (Map.Entry<String, String> confRemove : proposedConf.getValByRegex(
+          ".*" + queueToRemove.replaceAll("\\.", "\\.") + "\\..*")
+          .entrySet()) {
+        proposedConf.unset(confRemove.getKey());
+        confUpdate.put(confRemove.getKey(), null);
+      }
+    }
+  }
+
+  private void addQueue(
+      QueueConfigInfo addInfo, CapacitySchedulerConfiguration proposedConf,
+      Map<String, String> confUpdate) throws IOException {
+    if (addInfo == null) {
+      return;
+    } else {
+      CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+      String queuePath = addInfo.getQueue();
+      String queueName = queuePath.substring(queuePath.lastIndexOf('.') + 1);
+      if (cs.getQueue(queueName) != null) {
+        throw new IOException("Can't add existing queue " + queuePath);
+      } else if (queuePath.lastIndexOf('.') == -1) {
+        throw new IOException("Can't add invalid queue " + queuePath);
+      }
+      String parentQueue = queuePath.substring(0, queuePath.lastIndexOf('.'));
+      String[] siblings = proposedConf.getQueues(parentQueue);
+      List<String> siblingQueues = siblings == null ? new ArrayList<>() :
+          new ArrayList<>(Arrays.<String>asList(siblings));
+      siblingQueues.add(queuePath.substring(queuePath.lastIndexOf('.') + 1));
+      proposedConf.setQueues(parentQueue,
+          siblingQueues.toArray(new String[0]));
+      confUpdate.put(CapacitySchedulerConfiguration.PREFIX +
+          parentQueue + CapacitySchedulerConfiguration.DOT +
+          CapacitySchedulerConfiguration.QUEUES,
+          Joiner.on(',').join(siblingQueues));
+      String keyPrefix = CapacitySchedulerConfiguration.PREFIX +
+          queuePath + CapacitySchedulerConfiguration.DOT;
+      for (Map.Entry<String, String> kv : addInfo.getParams().entrySet()) {
+        if (kv.getValue() == null) {
+          proposedConf.unset(keyPrefix + kv.getKey());
+        } else {
+          proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
+        }
+        confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
+      }
+    }
+  }
+
+  private void updateQueue(QueueConfigInfo updateInfo,
+      CapacitySchedulerConfiguration proposedConf,
+      Map<String, String> confUpdate) {
+    if (updateInfo == null) {
+      return;
+    } else {
+      String queuePath = updateInfo.getQueue();
+      String keyPrefix = CapacitySchedulerConfiguration.PREFIX +
+          queuePath + CapacitySchedulerConfiguration.DOT;
+      for (Map.Entry<String, String> kv : updateInfo.getParams().entrySet()) {
+        if (kv.getValue() == null) {
+          proposedConf.unset(keyPrefix + kv.getKey());
+        } else {
+          proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
+        }
+        confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/342bea02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
new file mode 100644
index 0000000..b20eda6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+/**
+ * Information for adding or updating a queue to scheduler configuration
+ * for this queue.
+ */
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.FIELD)
+public class QueueConfigInfo {
+
+  @XmlElement(name = "queueName")
+  private String queue;
+
+  private HashMap<String, String> params = new HashMap<>();
+
+  public QueueConfigInfo() { }
+
+  public QueueConfigInfo(String queue, Map<String, String> params) {
+    this.queue = queue;
+    this.params = new HashMap<>(params);
+  }
+
+  public String getQueue() {
+    return this.queue;
+  }
+
+  public HashMap<String, String> getParams() {
+    return this.params;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/342bea02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java
new file mode 100644
index 0000000..644ec90
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import java.util.ArrayList;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+/**
+ * Information for making scheduler configuration changes (supports adding,
+ * removing, or updating a queue).
+ */
+@XmlRootElement(name = "schedConf")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class QueueConfigsUpdateInfo {
+
+  @XmlElement(name = "add")
+  private ArrayList<QueueConfigInfo> addQueueInfo = new ArrayList<>();
+
+  @XmlElement(name = "remove")
+  private ArrayList<String> removeQueueInfo = new ArrayList<>();
+
+  @XmlElement(name = "update")
+  private ArrayList<QueueConfigInfo> updateQueueInfo = new ArrayList<>();
+
+  public QueueConfigsUpdateInfo() {
+    // JAXB needs this
+  }
+
+  public ArrayList<QueueConfigInfo> getAddQueueInfo() {
+    return addQueueInfo;
+  }
+
+  public ArrayList<String> getRemoveQueueInfo() {
+    return removeQueueInfo;
+  }
+
+  public ArrayList<QueueConfigInfo> getUpdateQueueInfo() {
+    return updateQueueInfo;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/342bea02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
index 3f103b1..254da31 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
@@ -77,7 +77,11 @@ public class TestMutableCSConfigurationProvider {
     assertNull(confProvider.loadConfiguration(conf).get("badKey"));
     doThrow(new IOException()).when(cs).reinitialize(any(Configuration.class),
         any(RMContext.class));
-    confProvider.mutateConfiguration(TEST_USER, badUpdate);
+    try {
+      confProvider.mutateConfiguration(TEST_USER, badUpdate);
+    } catch (IOException e) {
+      // Expected exception.
+    }
     assertNull(confProvider.loadConfiguration(conf).get("badKey"));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/342bea02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
new file mode 100644
index 0000000..d149055
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
@@ -0,0 +1,477 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import com.google.inject.Guice;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.json.JSONJAXBContext;
+import com.sun.jersey.api.json.JSONMarshaller;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.QueueState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response.Status;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.StringWriter;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+/**
+ * Test scheduler configuration mutation via REST API.
+ */
+public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
+
+  private static final File CONF_FILE = new File(new File("target",
+      "test-classes"), YarnConfiguration.CS_CONFIGURATION_FILE);
+  private static final File OLD_CONF_FILE = new File(new File("target",
+      "test-classes"), YarnConfiguration.CS_CONFIGURATION_FILE + ".tmp");
+
+  private static MockRM rm;
+  private static String userName;
+  private static CapacitySchedulerConfiguration csConf;
+  private static YarnConfiguration conf;
+
+  private static class WebServletModule extends ServletModule {
+    @Override
+    protected void configureServlets() {
+      bind(JAXBContextResolver.class);
+      bind(RMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      try {
+        userName = UserGroupInformation.getCurrentUser().getShortUserName();
+      } catch (IOException ioe) {
+        throw new RuntimeException("Unable to get current user name "
+            + ioe.getMessage(), ioe);
+      }
+      csConf = new CapacitySchedulerConfiguration(new Configuration(false),
+          false);
+      setupQueueConfiguration(csConf);
+      conf = new YarnConfiguration();
+      conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+          ResourceScheduler.class);
+      conf.set(CapacitySchedulerConfiguration.CS_CONF_PROVIDER,
+          CapacitySchedulerConfiguration.STORE_CS_CONF_PROVIDER);
+      conf.set(YarnConfiguration.YARN_ADMIN_ACL, userName);
+      try {
+        if (CONF_FILE.exists()) {
+          if (!CONF_FILE.renameTo(OLD_CONF_FILE)) {
+            throw new RuntimeException("Failed to rename conf file");
+          }
+        }
+        FileOutputStream out = new FileOutputStream(CONF_FILE);
+        csConf.writeXml(out);
+        out.close();
+      } catch (IOException e) {
+        throw new RuntimeException("Failed to write XML file", e);
+      }
+      rm = new MockRM(conf);
+      bind(ResourceManager.class).toInstance(rm);
+      serve("/*").with(GuiceContainer.class);
+      filter("/*").through(TestRMWebServicesAppsModification
+          .TestRMCustomAuthFilter.class);
+    }
+  }
+
+  @Override
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+    GuiceServletConfig.setInjector(
+        Guice.createInjector(new WebServletModule()));
+  }
+
+  private static void setupQueueConfiguration(
+      CapacitySchedulerConfiguration config) {
+    config.setQueues(CapacitySchedulerConfiguration.ROOT,
+        new String[]{"a", "b", "c"});
+
+    final String a = CapacitySchedulerConfiguration.ROOT + ".a";
+    config.setCapacity(a, 25f);
+    config.setMaximumCapacity(a, 50f);
+
+    final String a1 = a + ".a1";
+    final String a2 = a + ".a2";
+    config.setQueues(a, new String[]{"a1", "a2"});
+    config.setCapacity(a1, 100f);
+    config.setCapacity(a2, 0f);
+
+    final String b = CapacitySchedulerConfiguration.ROOT + ".b";
+    config.setCapacity(b, 75f);
+
+    final String c = CapacitySchedulerConfiguration.ROOT + ".c";
+    config.setCapacity(c, 0f);
+
+    final String c1 = c + ".c1";
+    config.setQueues(c, new String[] {"c1"});
+    config.setCapacity(c1, 0f);
+  }
+
+  public TestRMWebServicesConfigurationMutation() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.yarn.server.resourcemanager.webapp")
+        .contextListenerClass(GuiceServletConfig.class)
+        .filterClass(com.google.inject.servlet.GuiceFilter.class)
+        .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
+  @Test
+  public void testAddNestedQueue() throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    // Add parent queue root.d with two children d1 and d2.
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    Map<String, String> d1Capacity = new HashMap<>();
+    d1Capacity.put(CapacitySchedulerConfiguration.CAPACITY, "25");
+    d1Capacity.put(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY, "25");
+    Map<String, String> nearEmptyCapacity = new HashMap<>();
+    nearEmptyCapacity.put(CapacitySchedulerConfiguration.CAPACITY, "1E-4");
+    nearEmptyCapacity.put(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY,
+        "1E-4");
+    Map<String, String> d2Capacity = new HashMap<>();
+    d2Capacity.put(CapacitySchedulerConfiguration.CAPACITY, "75");
+    d2Capacity.put(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY, "75");
+    QueueConfigInfo d1 = new QueueConfigInfo("root.d.d1", d1Capacity);
+    QueueConfigInfo d2 = new QueueConfigInfo("root.d.d2", d2Capacity);
+    QueueConfigInfo d = new QueueConfigInfo("root.d", nearEmptyCapacity);
+    updateInfo.getAddQueueInfo().add(d1);
+    updateInfo.getAddQueueInfo().add(d2);
+    updateInfo.getAddQueueInfo().add(d);
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    assertEquals(4, newCSConf.getQueues("root").length);
+    assertEquals(2, newCSConf.getQueues("root.d").length);
+    assertEquals(25.0f, newCSConf.getNonLabeledQueueCapacity("root.d.d1"),
+        0.01f);
+    assertEquals(75.0f, newCSConf.getNonLabeledQueueCapacity("root.d.d2"),
+        0.01f);
+  }
+
+  @Test
+  public void testAddWithUpdate() throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    // Add root.d with capacity 25, reducing root.b capacity from 75 to 50.
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    Map<String, String> dCapacity = new HashMap<>();
+    dCapacity.put(CapacitySchedulerConfiguration.CAPACITY, "25");
+    Map<String, String> bCapacity = new HashMap<>();
+    bCapacity.put(CapacitySchedulerConfiguration.CAPACITY, "50");
+    QueueConfigInfo d = new QueueConfigInfo("root.d", dCapacity);
+    QueueConfigInfo b = new QueueConfigInfo("root.b", bCapacity);
+    updateInfo.getAddQueueInfo().add(d);
+    updateInfo.getUpdateQueueInfo().add(b);
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    assertEquals(4, newCSConf.getQueues("root").length);
+    assertEquals(25.0f, newCSConf.getNonLabeledQueueCapacity("root.d"), 0.01f);
+    assertEquals(50.0f, newCSConf.getNonLabeledQueueCapacity("root.b"), 0.01f);
+  }
+
+  @Test
+  public void testRemoveQueue() throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    stopQueue("root.a.a2");
+    // Remove root.a.a2
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    updateInfo.getRemoveQueueInfo().add("root.a.a2");
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    assertEquals(1, newCSConf.getQueues("root.a").length);
+    assertEquals("a1", newCSConf.getQueues("root.a")[0]);
+  }
+
+  @Test
+  public void testRemoveParentQueue() throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    stopQueue("root.c", "root.c.c1");
+    // Remove root.c (parent queue)
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    updateInfo.getRemoveQueueInfo().add("root.c");
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    assertEquals(2, newCSConf.getQueues("root").length);
+    assertNull(newCSConf.getQueues("root.c"));
+  }
+
+  @Test
+  public void testRemoveParentQueueWithCapacity() throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    stopQueue("root.a", "root.a.a1", "root.a.a2");
+    // Remove root.a (parent queue) with capacity 25
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    updateInfo.getRemoveQueueInfo().add("root.a");
+
+    // Set root.b capacity to 100
+    Map<String, String> bCapacity = new HashMap<>();
+    bCapacity.put(CapacitySchedulerConfiguration.CAPACITY, "100");
+    QueueConfigInfo b = new QueueConfigInfo("root.b", bCapacity);
+    updateInfo.getUpdateQueueInfo().add(b);
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    assertEquals(2, newCSConf.getQueues("root").length);
+    assertEquals(100.0f, newCSConf.getNonLabeledQueueCapacity("root.b"),
+        0.01f);
+  }
+
+  @Test
+  public void testRemoveMultipleQueues() throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    stopQueue("root.b", "root.c", "root.c.c1");
+    // Remove root.b and root.c
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    updateInfo.getRemoveQueueInfo().add("root.b");
+    updateInfo.getRemoveQueueInfo().add("root.c");
+    Map<String, String> aCapacity = new HashMap<>();
+    aCapacity.put(CapacitySchedulerConfiguration.CAPACITY, "100");
+    aCapacity.put(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY, "100");
+    QueueConfigInfo configInfo = new QueueConfigInfo("root.a", aCapacity);
+    updateInfo.getUpdateQueueInfo().add(configInfo);
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    assertEquals(1, newCSConf.getQueues("root").length);
+  }
+
+  private void stopQueue(String... queuePaths) throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    // Set state of queues to STOPPED.
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    Map<String, String> stoppedParam = new HashMap<>();
+    stoppedParam.put(CapacitySchedulerConfiguration.STATE,
+        QueueState.STOPPED.toString());
+    for (String queue : queuePaths) {
+      QueueConfigInfo stoppedInfo = new QueueConfigInfo(queue, stoppedParam);
+      updateInfo.getUpdateQueueInfo().add(stoppedInfo);
+    }
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    for (String queue : queuePaths) {
+      assertEquals(QueueState.STOPPED, newCSConf.getState(queue));
+    }
+  }
+
+  @Test
+  public void testUpdateQueue() throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    // Update config value.
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    Map<String, String> updateParam = new HashMap<>();
+    updateParam.put(CapacitySchedulerConfiguration.MAXIMUM_AM_RESOURCE_SUFFIX,
+        "0.2");
+    QueueConfigInfo aUpdateInfo = new QueueConfigInfo("root.a", updateParam);
+    updateInfo.getUpdateQueueInfo().add(aUpdateInfo);
+    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+
+    assertEquals(CapacitySchedulerConfiguration
+            .DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT,
+        cs.getConfiguration()
+            .getMaximumApplicationMasterResourcePerQueuePercent("root.a"),
+        0.001f);
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf = cs.getConfiguration();
+    assertEquals(0.2f, newCSConf
+        .getMaximumApplicationMasterResourcePerQueuePercent("root.a"), 0.001f);
+
+    // Remove config. Config value should be reverted to default.
+    updateParam.put(CapacitySchedulerConfiguration.MAXIMUM_AM_RESOURCE_SUFFIX,
+        null);
+    aUpdateInfo = new QueueConfigInfo("root.a", updateParam);
+    updateInfo.getUpdateQueueInfo().clear();
+    updateInfo.getUpdateQueueInfo().add(aUpdateInfo);
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    newCSConf = cs.getConfiguration();
+    assertEquals(CapacitySchedulerConfiguration
+        .DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT, newCSConf
+            .getMaximumApplicationMasterResourcePerQueuePercent("root.a"),
+        0.001f);
+  }
+
+  @Test
+  public void testUpdateQueueCapacity() throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    // Update root.a and root.b capacity to 50.
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    Map<String, String> updateParam = new HashMap<>();
+    updateParam.put(CapacitySchedulerConfiguration.CAPACITY, "50");
+    QueueConfigInfo aUpdateInfo = new QueueConfigInfo("root.a", updateParam);
+    QueueConfigInfo bUpdateInfo = new QueueConfigInfo("root.b", updateParam);
+    updateInfo.getUpdateQueueInfo().add(aUpdateInfo);
+    updateInfo.getUpdateQueueInfo().add(bUpdateInfo);
+
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("queues").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    assertEquals(50.0f, newCSConf.getNonLabeledQueueCapacity("root.a"), 0.01f);
+    assertEquals(50.0f, newCSConf.getNonLabeledQueueCapacity("root.b"), 0.01f);
+  }
+
+  @Override
+  @After
+  public void tearDown() throws Exception {
+    if (rm != null) {
+      rm.stop();
+    }
+    CONF_FILE.delete();
+    if (!OLD_CONF_FILE.renameTo(CONF_FILE)) {
+      throw new RuntimeException("Failed to re-copy old configuration file");
+    }
+    super.tearDown();
+  }
+
+  @SuppressWarnings("rawtypes")
+  private String toJson(Object nsli, Class klass) throws Exception {
+    StringWriter sw = new StringWriter();
+    JSONJAXBContext ctx = new JSONJAXBContext(klass);
+    JSONMarshaller jm = ctx.createJSONMarshaller();
+    jm.marshallToJSON(nsli, sw);
+    return sw.toString();
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: YARN-5949. Add pluggable configuration ACL policy interface and implementation. (Jonathan Hung via wangda)

Posted by xg...@apache.org.
YARN-5949. Add pluggable configuration ACL policy interface and implementation. (Jonathan Hung via wangda)

Change-Id: Ib98e82ff753bede21fcab2e6ca9ec1e7a5a2008f


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbaa3456
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbaa3456
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbaa3456

Branch: refs/heads/YARN-5734
Commit: fbaa34569ff596f944b1f7f812ac2dcdd987bcf9
Parents: 342bea0
Author: Wangda Tan <wa...@apache.org>
Authored: Mon May 22 13:38:31 2017 -0700
Committer: Xuan <xg...@apache.org>
Committed: Mon Jul 31 08:59:23 2017 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |   3 +
 .../src/main/resources/yarn-default.xml         |  11 ++
 .../ConfigurationMutationACLPolicy.java         |  47 ++++++
 .../ConfigurationMutationACLPolicyFactory.java  |  49 ++++++
 .../DefaultConfigurationMutationACLPolicy.java  |  45 ++++++
 .../scheduler/MutableConfScheduler.java         |  19 ++-
 .../scheduler/MutableConfigurationProvider.java |   8 +-
 .../scheduler/capacity/CapacityScheduler.java   |   6 +-
 .../conf/MutableCSConfigurationProvider.java    | 151 +++++++++++++++++-
 ...ueueAdminConfigurationMutationACLPolicy.java |  96 ++++++++++++
 .../resourcemanager/webapp/RMWebServices.java   | 131 +---------------
 .../TestConfigurationMutationACLPolicies.java   | 154 +++++++++++++++++++
 .../TestMutableCSConfigurationProvider.java     |  40 +++--
 13 files changed, 610 insertions(+), 150 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbaa3456/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ce413f6..01db626 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -627,6 +627,9 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_CONFIGURATION_STORE =
       MEMORY_CONFIGURATION_STORE;
 
+  public static final String RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS =
+      YARN_PREFIX + "scheduler.configuration.mutation.acl-policy.class";
+
   public static final String YARN_AUTHORIZATION_PROVIDER = YARN_PREFIX
       + "authorization-provider";
   private static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS_HTTP =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbaa3456/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 74ff747..a0bed5f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3148,4 +3148,15 @@
     <value>memory</value>
   </property>
 
+  <property>
+    <description>
+      The class to use for configuration mutation ACL policy if using a mutable
+      configuration provider. Controls whether a mutation request is allowed.
+      The DefaultConfigurationMutationACLPolicy checks if the requestor is a
+      YARN admin.
+    </description>
+    <name>yarn.scheduler.configuration.mutation.acl-policy.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.DefaultConfigurationMutationACLPolicy</value>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbaa3456/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
new file mode 100644
index 0000000..724487b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+
+/**
+ * Interface for determining whether configuration mutations are allowed.
+ */
+public interface ConfigurationMutationACLPolicy {
+
+  /**
+   * Initialize ACL policy with configuration and RMContext.
+   * @param conf Configuration to initialize with.
+   * @param rmContext rmContext
+   */
+  void init(Configuration conf, RMContext rmContext);
+
+  /**
+   * Check if mutation is allowed.
+   * @param user User issuing the request
+   * @param confUpdate configurations to be updated
+   * @return whether provided mutation is allowed or not
+   */
+  boolean isMutationAllowed(UserGroupInformation user, QueueConfigsUpdateInfo
+      confUpdate);
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbaa3456/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicyFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicyFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicyFactory.java
new file mode 100644
index 0000000..2898785
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicyFactory.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+/**
+ * Factory class for creating instances of
+ * {@link ConfigurationMutationACLPolicy}.
+ */
+public final class ConfigurationMutationACLPolicyFactory {
+
+  private static final Log LOG = LogFactory.getLog(
+      ConfigurationMutationACLPolicyFactory.class);
+
+  private ConfigurationMutationACLPolicyFactory() {
+    // Unused.
+  }
+
+  public static ConfigurationMutationACLPolicy getPolicy(Configuration conf) {
+    Class<? extends ConfigurationMutationACLPolicy> policyClass =
+        conf.getClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+            DefaultConfigurationMutationACLPolicy.class,
+            ConfigurationMutationACLPolicy.class);
+    LOG.info("Using ConfigurationMutationACLPolicy implementation - " +
+        policyClass);
+    return ReflectionUtils.newInstance(policyClass, conf);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbaa3456/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
new file mode 100644
index 0000000..680c3b8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+
+/**
+ * Default configuration mutation ACL policy. Checks if user is YARN admin.
+ */
+public class DefaultConfigurationMutationACLPolicy implements
+    ConfigurationMutationACLPolicy {
+
+  private YarnAuthorizationProvider authorizer;
+
+  @Override
+  public void init(Configuration conf, RMContext rmContext) {
+    authorizer = YarnAuthorizationProvider.getInstance(conf);
+  }
+
+  @Override
+  public boolean isMutationAllowed(UserGroupInformation user,
+      QueueConfigsUpdateInfo confUpdate) {
+    return authorizer.isAdmin(user);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbaa3456/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
index 35e36e1..93a935e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
@@ -17,10 +17,11 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
 
 import java.io.IOException;
-import java.util.Map;
 
 /**
  * Interface for a scheduler that supports changing configuration at runtime.
@@ -31,10 +32,22 @@ public interface MutableConfScheduler extends ResourceScheduler {
   /**
    * Update the scheduler's configuration.
    * @param user Caller of this update
-   * @param confUpdate key-value map of the configuration update
+   * @param confUpdate configuration update
    * @throws IOException if update is invalid
    */
   void updateConfiguration(UserGroupInformation user,
-      Map<String, String> confUpdate) throws IOException;
+      QueueConfigsUpdateInfo confUpdate) throws IOException;
 
+  /**
+   * Get the scheduler configuration.
+   * @return the scheduler configuration
+   */
+  Configuration getConfiguration();
+
+  /**
+   * Get queue object based on queue name.
+   * @param queueName the queue name
+   * @return the queue object
+   */
+  Queue getQueue(String queueName);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbaa3456/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index 889c3bc..f04c128 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -18,8 +18,10 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+
 import java.io.IOException;
-import java.util.Map;
 
 /**
  * Interface for allowing changing scheduler configurations.
@@ -32,7 +34,7 @@ public interface MutableConfigurationProvider {
    * @param confUpdate Key-value pairs for configurations to be updated.
    * @throws IOException if scheduler could not be reinitialized
    */
-  void mutateConfiguration(String user, Map<String, String> confUpdate)
-      throws IOException;
+  void mutateConfiguration(UserGroupInformation user, QueueConfigsUpdateInfo
+      confUpdate) throws IOException;
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbaa3456/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index ac1748a..5bcb352 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -137,6 +137,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.Placeme
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SimplePlacementSet;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AppPriorityACLsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
 import org.apache.hadoop.yarn.server.utils.Lock;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
@@ -642,6 +643,7 @@ public class CapacityScheduler extends
     preemptionManager.refreshQueues(null, this.getRootQueue());
   }
 
+  @Override
   public CSQueue getQueue(String queueName) {
     if (queueName == null) {
       return null;
@@ -2517,10 +2519,10 @@ public class CapacityScheduler extends
 
   @Override
   public void updateConfiguration(UserGroupInformation user,
-      Map<String, String> confUpdate) throws IOException {
+      QueueConfigsUpdateInfo confUpdate) throws IOException {
     if (csConfProvider instanceof MutableConfigurationProvider) {
       ((MutableConfigurationProvider) csConfProvider).mutateConfiguration(
-          user.getShortUserName(), confUpdate);
+          user, confUpdate);
     } else {
       throw new UnsupportedOperationException("Configured CS configuration " +
           "provider does not support updating configuration.");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbaa3456/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
index ea1b3c0..8b879b0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
@@ -18,14 +18,27 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
 
+import com.google.common.base.Joiner;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ConfigurationMutationACLPolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ConfigurationMutationACLPolicyFactory;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfigurationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore.LogMutation;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 
 /**
@@ -38,6 +51,7 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
 
   private Configuration schedConf;
   private YarnConfigurationStore confStore;
+  private ConfigurationMutationACLPolicy aclMutationPolicy;
   private RMContext rmContext;
   private Configuration conf;
 
@@ -68,6 +82,9 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
       schedConf.set(kv.getKey(), kv.getValue());
     }
     confStore.initialize(config, schedConf);
+    this.aclMutationPolicy = ConfigurationMutationACLPolicyFactory
+        .getPolicy(config);
+    aclMutationPolicy.init(config, rmContext);
     this.conf = config;
   }
 
@@ -80,12 +97,17 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
   }
 
   @Override
-  public void mutateConfiguration(String user,
-      Map<String, String> confUpdate) throws IOException {
+  public void mutateConfiguration(UserGroupInformation user,
+      QueueConfigsUpdateInfo confUpdate) throws IOException {
+    if (!aclMutationPolicy.isMutationAllowed(user, confUpdate)) {
+      throw new AccessControlException("User is not admin of all modified" +
+          " queues.");
+    }
     Configuration oldConf = new Configuration(schedConf);
-    LogMutation log = new LogMutation(confUpdate, user);
+    Map<String, String> kvUpdate = constructKeyValueConfUpdate(confUpdate);
+    LogMutation log = new LogMutation(kvUpdate, user.getShortUserName());
     long id = confStore.logMutation(log);
-    for (Map.Entry<String, String> kv : confUpdate.entrySet()) {
+    for (Map.Entry<String, String> kv : kvUpdate.entrySet()) {
       if (kv.getValue() == null) {
         schedConf.unset(kv.getKey());
       } else {
@@ -101,4 +123,125 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
     }
     confStore.confirmMutation(id, true);
   }
+
+
+  private Map<String, String> constructKeyValueConfUpdate(
+      QueueConfigsUpdateInfo mutationInfo) throws IOException {
+    CapacityScheduler cs = (CapacityScheduler) rmContext.getScheduler();
+    CapacitySchedulerConfiguration proposedConf =
+        new CapacitySchedulerConfiguration(cs.getConfiguration(), false);
+    Map<String, String> confUpdate = new HashMap<>();
+    for (String queueToRemove : mutationInfo.getRemoveQueueInfo()) {
+      removeQueue(queueToRemove, proposedConf, confUpdate);
+    }
+    for (QueueConfigInfo addQueueInfo : mutationInfo.getAddQueueInfo()) {
+      addQueue(addQueueInfo, proposedConf, confUpdate);
+    }
+    for (QueueConfigInfo updateQueueInfo : mutationInfo.getUpdateQueueInfo()) {
+      updateQueue(updateQueueInfo, proposedConf, confUpdate);
+    }
+    return confUpdate;
+  }
+
+  private void removeQueue(
+      String queueToRemove, CapacitySchedulerConfiguration proposedConf,
+      Map<String, String> confUpdate) throws IOException {
+    if (queueToRemove == null) {
+      return;
+    } else {
+      CapacityScheduler cs = (CapacityScheduler) rmContext.getScheduler();
+      String queueName = queueToRemove.substring(
+          queueToRemove.lastIndexOf('.') + 1);
+      CSQueue queue = cs.getQueue(queueName);
+      if (queue == null ||
+          !queue.getQueuePath().equals(queueToRemove)) {
+        throw new IOException("Queue " + queueToRemove + " not found");
+      } else if (queueToRemove.lastIndexOf('.') == -1) {
+        throw new IOException("Can't remove queue " + queueToRemove);
+      }
+      String parentQueuePath = queueToRemove.substring(0, queueToRemove
+          .lastIndexOf('.'));
+      String[] siblingQueues = proposedConf.getQueues(parentQueuePath);
+      List<String> newSiblingQueues = new ArrayList<>();
+      for (String siblingQueue : siblingQueues) {
+        if (!siblingQueue.equals(queueName)) {
+          newSiblingQueues.add(siblingQueue);
+        }
+      }
+      proposedConf.setQueues(parentQueuePath, newSiblingQueues
+          .toArray(new String[0]));
+      String queuesConfig = CapacitySchedulerConfiguration.PREFIX
+          + parentQueuePath + CapacitySchedulerConfiguration.DOT
+          + CapacitySchedulerConfiguration.QUEUES;
+      if (newSiblingQueues.size() == 0) {
+        confUpdate.put(queuesConfig, null);
+      } else {
+        confUpdate.put(queuesConfig, Joiner.on(',').join(newSiblingQueues));
+      }
+      for (Map.Entry<String, String> confRemove : proposedConf.getValByRegex(
+          ".*" + queueToRemove.replaceAll("\\.", "\\.") + "\\..*")
+          .entrySet()) {
+        proposedConf.unset(confRemove.getKey());
+        confUpdate.put(confRemove.getKey(), null);
+      }
+    }
+  }
+
+  private void addQueue(
+      QueueConfigInfo addInfo, CapacitySchedulerConfiguration proposedConf,
+      Map<String, String> confUpdate) throws IOException {
+    if (addInfo == null) {
+      return;
+    } else {
+      CapacityScheduler cs = (CapacityScheduler) rmContext.getScheduler();
+      String queuePath = addInfo.getQueue();
+      String queueName = queuePath.substring(queuePath.lastIndexOf('.') + 1);
+      if (cs.getQueue(queueName) != null) {
+        throw new IOException("Can't add existing queue " + queuePath);
+      } else if (queuePath.lastIndexOf('.') == -1) {
+        throw new IOException("Can't add invalid queue " + queuePath);
+      }
+      String parentQueue = queuePath.substring(0, queuePath.lastIndexOf('.'));
+      String[] siblings = proposedConf.getQueues(parentQueue);
+      List<String> siblingQueues = siblings == null ? new ArrayList<>() :
+          new ArrayList<>(Arrays.<String>asList(siblings));
+      siblingQueues.add(queuePath.substring(queuePath.lastIndexOf('.') + 1));
+      proposedConf.setQueues(parentQueue,
+          siblingQueues.toArray(new String[0]));
+      confUpdate.put(CapacitySchedulerConfiguration.PREFIX
+              + parentQueue + CapacitySchedulerConfiguration.DOT
+              + CapacitySchedulerConfiguration.QUEUES,
+          Joiner.on(',').join(siblingQueues));
+      String keyPrefix = CapacitySchedulerConfiguration.PREFIX
+          + queuePath + CapacitySchedulerConfiguration.DOT;
+      for (Map.Entry<String, String> kv : addInfo.getParams().entrySet()) {
+        if (kv.getValue() == null) {
+          proposedConf.unset(keyPrefix + kv.getKey());
+        } else {
+          proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
+        }
+        confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
+      }
+    }
+  }
+
+  private void updateQueue(QueueConfigInfo updateInfo,
+      CapacitySchedulerConfiguration proposedConf,
+      Map<String, String> confUpdate) {
+    if (updateInfo == null) {
+      return;
+    } else {
+      String queuePath = updateInfo.getQueue();
+      String keyPrefix = CapacitySchedulerConfiguration.PREFIX
+          + queuePath + CapacitySchedulerConfiguration.DOT;
+      for (Map.Entry<String, String> kv : updateInfo.getParams().entrySet()) {
+        if (kv.getValue() == null) {
+          proposedConf.unset(keyPrefix + kv.getKey());
+        } else {
+          proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
+        }
+        confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbaa3456/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
new file mode 100644
index 0000000..1f94c1c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ConfigurationMutationACLPolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * A configuration mutation ACL policy which checks that user has admin
+ * privileges on all queues they are changing.
+ */
+public class QueueAdminConfigurationMutationACLPolicy implements
+    ConfigurationMutationACLPolicy {
+
+  private RMContext rmContext;
+
+  @Override
+  public void init(Configuration conf, RMContext context) {
+    this.rmContext = context;
+  }
+
+  @Override
+  public boolean isMutationAllowed(UserGroupInformation user,
+      QueueConfigsUpdateInfo confUpdate) {
+    Set<String> queues = new HashSet<>();
+    for (QueueConfigInfo addQueueInfo : confUpdate.getAddQueueInfo()) {
+      queues.add(addQueueInfo.getQueue());
+    }
+    for (String removeQueue : confUpdate.getRemoveQueueInfo()) {
+      queues.add(removeQueue);
+    }
+    for (QueueConfigInfo updateQueueInfo : confUpdate.getUpdateQueueInfo()) {
+      queues.add(updateQueueInfo.getQueue());
+    }
+    for (String queuePath : queues) {
+      String queueName = queuePath.lastIndexOf('.') != -1 ?
+          queuePath.substring(queuePath.lastIndexOf('.') + 1) : queuePath;
+      QueueInfo queueInfo = null;
+      try {
+        queueInfo = rmContext.getScheduler()
+            .getQueueInfo(queueName, false, false);
+      } catch (IOException e) {
+        // Queue is not found, do nothing.
+      }
+      String parentPath = queuePath;
+      // TODO: handle global config change.
+      while (queueInfo == null) {
+        // We are adding a queue (whose parent we are possibly also adding).
+        // Check ACL of lowest parent queue which already exists.
+        parentPath = parentPath.substring(0, parentPath.lastIndexOf('.'));
+        String parentName = parentPath.lastIndexOf('.') != -1 ?
+            parentPath.substring(parentPath.lastIndexOf('.') + 1) : parentPath;
+        try {
+          queueInfo = rmContext.getScheduler()
+              .getQueueInfo(parentName, false, false);
+        } catch (IOException e) {
+          // Queue is not found, do nothing.
+        }
+      }
+      Queue queue = ((MutableConfScheduler) rmContext.getScheduler())
+          .getQueue(queueInfo.getQueueName());
+      if (queue != null && !queue.hasAccess(QueueACL.ADMINISTER_QUEUE, user)) {
+        return false;
+      }
+    }
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbaa3456/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 56a0bf8..d670748 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -135,7 +135,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
@@ -2434,10 +2433,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
         callerUGI.doAs(new PrivilegedExceptionAction<Void>() {
           @Override
           public Void run() throws IOException, YarnException {
-            Map<String, String> confUpdate =
-                constructKeyValueConfUpdate(mutationInfo);
-            ((CapacityScheduler) scheduler).updateConfiguration(callerUGI,
-                confUpdate);
+            ((MutableConfScheduler) scheduler).updateConfiguration(callerUGI,
+                mutationInfo);
             return null;
           }
         });
@@ -2449,129 +2446,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
           "successfully applied.").build();
     } else {
       return Response.status(Status.BAD_REQUEST)
-          .entity("Configuration change only supported by CapacityScheduler.")
+          .entity("Configuration change only supported by " +
+              "MutableConfScheduler.")
           .build();
     }
   }
-
-  private Map<String, String> constructKeyValueConfUpdate(
-      QueueConfigsUpdateInfo mutationInfo) throws IOException {
-    CapacitySchedulerConfiguration currentConf =
-        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
-    CapacitySchedulerConfiguration proposedConf =
-        new CapacitySchedulerConfiguration(currentConf, false);
-    Map<String, String> confUpdate = new HashMap<>();
-    for (String queueToRemove : mutationInfo.getRemoveQueueInfo()) {
-      removeQueue(queueToRemove, proposedConf, confUpdate);
-    }
-    for (QueueConfigInfo addQueueInfo : mutationInfo.getAddQueueInfo()) {
-      addQueue(addQueueInfo, proposedConf, confUpdate);
-    }
-    for (QueueConfigInfo updateQueueInfo : mutationInfo.getUpdateQueueInfo()) {
-      updateQueue(updateQueueInfo, proposedConf, confUpdate);
-    }
-    return confUpdate;
-  }
-
-  private void removeQueue(
-      String queueToRemove, CapacitySchedulerConfiguration proposedConf,
-      Map<String, String> confUpdate) throws IOException {
-    if (queueToRemove == null) {
-      return;
-    } else {
-      CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
-      String queueName = queueToRemove.substring(
-          queueToRemove.lastIndexOf('.') + 1);
-      CSQueue queue = cs.getQueue(queueName);
-      if (queue == null ||
-          !queue.getQueuePath().equals(queueToRemove)) {
-        throw new IOException("Queue " + queueToRemove + " not found");
-      } else if (queueToRemove.lastIndexOf('.') == -1) {
-        throw new IOException("Can't remove queue " + queueToRemove);
-      }
-      String parentQueuePath = queueToRemove.substring(0, queueToRemove
-          .lastIndexOf('.'));
-      String[] siblingQueues = proposedConf.getQueues(parentQueuePath);
-      List<String> newSiblingQueues = new ArrayList<>();
-      for (String siblingQueue : siblingQueues) {
-        if (!siblingQueue.equals(queueName)) {
-          newSiblingQueues.add(siblingQueue);
-        }
-      }
-      proposedConf.setQueues(parentQueuePath, newSiblingQueues
-          .toArray(new String[0]));
-      String queuesConfig = CapacitySchedulerConfiguration.PREFIX +
-          parentQueuePath + CapacitySchedulerConfiguration.DOT +
-          CapacitySchedulerConfiguration.QUEUES;
-      if (newSiblingQueues.size() == 0) {
-        confUpdate.put(queuesConfig, null);
-      } else {
-        confUpdate.put(queuesConfig, Joiner.on(',').join(newSiblingQueues));
-      }
-      for (Map.Entry<String, String> confRemove : proposedConf.getValByRegex(
-          ".*" + queueToRemove.replaceAll("\\.", "\\.") + "\\..*")
-          .entrySet()) {
-        proposedConf.unset(confRemove.getKey());
-        confUpdate.put(confRemove.getKey(), null);
-      }
-    }
-  }
-
-  private void addQueue(
-      QueueConfigInfo addInfo, CapacitySchedulerConfiguration proposedConf,
-      Map<String, String> confUpdate) throws IOException {
-    if (addInfo == null) {
-      return;
-    } else {
-      CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
-      String queuePath = addInfo.getQueue();
-      String queueName = queuePath.substring(queuePath.lastIndexOf('.') + 1);
-      if (cs.getQueue(queueName) != null) {
-        throw new IOException("Can't add existing queue " + queuePath);
-      } else if (queuePath.lastIndexOf('.') == -1) {
-        throw new IOException("Can't add invalid queue " + queuePath);
-      }
-      String parentQueue = queuePath.substring(0, queuePath.lastIndexOf('.'));
-      String[] siblings = proposedConf.getQueues(parentQueue);
-      List<String> siblingQueues = siblings == null ? new ArrayList<>() :
-          new ArrayList<>(Arrays.<String>asList(siblings));
-      siblingQueues.add(queuePath.substring(queuePath.lastIndexOf('.') + 1));
-      proposedConf.setQueues(parentQueue,
-          siblingQueues.toArray(new String[0]));
-      confUpdate.put(CapacitySchedulerConfiguration.PREFIX +
-          parentQueue + CapacitySchedulerConfiguration.DOT +
-          CapacitySchedulerConfiguration.QUEUES,
-          Joiner.on(',').join(siblingQueues));
-      String keyPrefix = CapacitySchedulerConfiguration.PREFIX +
-          queuePath + CapacitySchedulerConfiguration.DOT;
-      for (Map.Entry<String, String> kv : addInfo.getParams().entrySet()) {
-        if (kv.getValue() == null) {
-          proposedConf.unset(keyPrefix + kv.getKey());
-        } else {
-          proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
-        }
-        confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
-      }
-    }
-  }
-
-  private void updateQueue(QueueConfigInfo updateInfo,
-      CapacitySchedulerConfiguration proposedConf,
-      Map<String, String> confUpdate) {
-    if (updateInfo == null) {
-      return;
-    } else {
-      String queuePath = updateInfo.getQueue();
-      String keyPrefix = CapacitySchedulerConfiguration.PREFIX +
-          queuePath + CapacitySchedulerConfiguration.DOT;
-      for (Map.Entry<String, String> kv : updateInfo.getParams().entrySet()) {
-        if (kv.getValue() == null) {
-          proposedConf.unset(keyPrefix + kv.getKey());
-        } else {
-          proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
-        }
-        confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
-      }
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbaa3456/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
new file mode 100644
index 0000000..4016dcf
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
@@ -0,0 +1,154 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.QueueAdminConfigurationMutationACLPolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Map;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class TestConfigurationMutationACLPolicies {
+
+  private ConfigurationMutationACLPolicy policy;
+  private RMContext rmContext;
+  private MutableConfScheduler scheduler;
+
+  private static final UserGroupInformation GOOD_USER = UserGroupInformation
+      .createUserForTesting("goodUser", new String[] {});
+  private static final UserGroupInformation BAD_USER = UserGroupInformation
+      .createUserForTesting("badUser", new String[] {});
+  private static final Map<String, String> EMPTY_MAP =
+      Collections.<String, String>emptyMap();
+
+  @Before
+  public void setUp() throws IOException {
+    rmContext = mock(RMContext.class);
+    scheduler = mock(MutableConfScheduler.class);
+    when(rmContext.getScheduler()).thenReturn(scheduler);
+    mockQueue("a", scheduler);
+    mockQueue("b", scheduler);
+    mockQueue("b1", scheduler);
+  }
+
+  private void mockQueue(String queueName, MutableConfScheduler scheduler)
+      throws IOException {
+    QueueInfo queueInfo = QueueInfo.newInstance(queueName, 0, 0, 0, null, null,
+        null, null, null, null, false);
+    when(scheduler.getQueueInfo(eq(queueName), anyBoolean(), anyBoolean()))
+        .thenReturn(queueInfo);
+    Queue queue = mock(Queue.class);
+    when(queue.hasAccess(eq(QueueACL.ADMINISTER_QUEUE), eq(GOOD_USER)))
+        .thenReturn(true);
+    when(queue.hasAccess(eq(QueueACL.ADMINISTER_QUEUE), eq(BAD_USER)))
+        .thenReturn(false);
+    when(scheduler.getQueue(eq(queueName))).thenReturn(queue);
+  }
+  @Test
+  public void testDefaultPolicy() {
+    Configuration conf = new Configuration();
+    conf.set(YarnConfiguration.YARN_ADMIN_ACL, GOOD_USER.getShortUserName());
+    conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+        DefaultConfigurationMutationACLPolicy.class,
+        ConfigurationMutationACLPolicy.class);
+    policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
+    policy.init(conf, rmContext);
+    assertTrue(policy.isMutationAllowed(GOOD_USER, null));
+    assertFalse(policy.isMutationAllowed(BAD_USER, null));
+  }
+  
+  @Test
+  public void testQueueAdminBasedPolicy() {
+    Configuration conf = new Configuration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+        QueueAdminConfigurationMutationACLPolicy.class,
+        ConfigurationMutationACLPolicy.class);
+    policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
+    policy.init(conf, rmContext);
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    QueueConfigInfo configInfo = new QueueConfigInfo("root.a", EMPTY_MAP);
+    updateInfo.getUpdateQueueInfo().add(configInfo);
+    assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
+    assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
+  }
+
+  @Test
+  public void testQueueAdminPolicyAddQueue() {
+    Configuration conf = new Configuration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+        QueueAdminConfigurationMutationACLPolicy.class,
+        ConfigurationMutationACLPolicy.class);
+    policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
+    policy.init(conf, rmContext);
+    // Add root.b.b1. Should check ACL of root.b queue.
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    QueueConfigInfo configInfo = new QueueConfigInfo("root.b.b2", EMPTY_MAP);
+    updateInfo.getAddQueueInfo().add(configInfo);
+    assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
+    assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
+  }
+
+  @Test
+  public void testQueueAdminPolicyAddNestedQueue() {
+    Configuration conf = new Configuration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+        QueueAdminConfigurationMutationACLPolicy.class,
+        ConfigurationMutationACLPolicy.class);
+    policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
+    policy.init(conf, rmContext);
+    // Add root.b.b1.b11. Should check ACL of root.b queue.
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    QueueConfigInfo configInfo = new QueueConfigInfo("root.b.b2.b21", EMPTY_MAP);
+    updateInfo.getAddQueueInfo().add(configInfo);
+    assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
+    assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
+  }
+
+  @Test
+  public void testQueueAdminPolicyRemoveQueue() {
+    Configuration conf = new Configuration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+        QueueAdminConfigurationMutationACLPolicy.class,
+        ConfigurationMutationACLPolicy.class);
+    policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
+    policy.init(conf, rmContext);
+    // Remove root.b.b1.
+    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    updateInfo.getRemoveQueueInfo().add("root.b.b1");
+    assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
+    assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbaa3456/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
index 254da31..13229b1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
@@ -19,8 +19,12 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -43,22 +47,34 @@ public class TestMutableCSConfigurationProvider {
 
   private MutableCSConfigurationProvider confProvider;
   private RMContext rmContext;
-  private Map<String, String> goodUpdate;
-  private Map<String, String> badUpdate;
+  private QueueConfigsUpdateInfo goodUpdate;
+  private QueueConfigsUpdateInfo badUpdate;
   private CapacityScheduler cs;
 
-  private static final String TEST_USER = "testUser";
+  private static final UserGroupInformation TEST_USER = UserGroupInformation
+      .createUserForTesting("testUser", new String[] {});
 
   @Before
   public void setUp() {
     cs = mock(CapacityScheduler.class);
     rmContext = mock(RMContext.class);
     when(rmContext.getScheduler()).thenReturn(cs);
+    when(cs.getConfiguration()).thenReturn(
+        new CapacitySchedulerConfiguration());
     confProvider = new MutableCSConfigurationProvider(rmContext);
-    goodUpdate = new HashMap<>();
-    goodUpdate.put("goodKey", "goodVal");
-    badUpdate = new HashMap<>();
-    badUpdate.put("badKey", "badVal");
+    goodUpdate = new QueueConfigsUpdateInfo();
+    Map<String, String> goodUpdateMap = new HashMap<>();
+    goodUpdateMap.put("goodKey", "goodVal");
+    QueueConfigInfo goodUpdateInfo = new
+        QueueConfigInfo("root.a", goodUpdateMap);
+    goodUpdate.getUpdateQueueInfo().add(goodUpdateInfo);
+
+    badUpdate = new QueueConfigsUpdateInfo();
+    Map<String, String> badUpdateMap = new HashMap<>();
+    badUpdateMap.put("badKey", "badVal");
+    QueueConfigInfo badUpdateInfo = new
+        QueueConfigInfo("root.a", badUpdateMap);
+    badUpdate.getUpdateQueueInfo().add(badUpdateInfo);
   }
 
   @Test
@@ -66,15 +82,16 @@ public class TestMutableCSConfigurationProvider {
     Configuration conf = new Configuration();
     confProvider.init(conf);
     assertNull(confProvider.loadConfiguration(conf)
-        .get("goodKey"));
+        .get("yarn.scheduler.capacity.root.a.goodKey"));
 
     doNothing().when(cs).reinitialize(any(Configuration.class),
         any(RMContext.class));
     confProvider.mutateConfiguration(TEST_USER, goodUpdate);
     assertEquals("goodVal", confProvider.loadConfiguration(conf)
-        .get("goodKey"));
+        .get("yarn.scheduler.capacity.root.a.goodKey"));
 
-    assertNull(confProvider.loadConfiguration(conf).get("badKey"));
+    assertNull(confProvider.loadConfiguration(conf).get(
+        "yarn.scheduler.capacity.root.a.badKey"));
     doThrow(new IOException()).when(cs).reinitialize(any(Configuration.class),
         any(RMContext.class));
     try {
@@ -82,6 +99,7 @@ public class TestMutableCSConfigurationProvider {
     } catch (IOException e) {
       // Expected exception.
     }
-    assertNull(confProvider.loadConfiguration(conf).get("badKey"));
+    assertNull(confProvider.loadConfiguration(conf).get(
+        "yarn.scheduler.capacity.root.a.badKey"));
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: HADOOP-14681. Remove MockitoMaker class. Contributed by Andras Bokor.

Posted by xg...@apache.org.
HADOOP-14681. Remove MockitoMaker class. Contributed by Andras Bokor.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cca51e91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cca51e91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cca51e91

Branch: refs/heads/YARN-5734
Commit: cca51e916b7387ea358688e8f8188ead948fbdcc
Parents: 218b1b3
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Jul 25 15:24:56 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Jul 25 15:24:56 2017 +0900

----------------------------------------------------------------------
 .../org/apache/hadoop/test/MockitoMaker.java    | 132 -------------------
 .../org/apache/hadoop/util/TestDiskChecker.java |  12 +-
 .../v2/app/metrics/TestMRAppMetrics.java        |   9 +-
 .../lib/input/TestMRCJCFileInputFormat.java     |  11 +-
 .../hadoop/mapred/TestShuffleHandler.java       |  32 ++---
 .../scheduler/TestQueueMetrics.java             |  14 +-
 6 files changed, 38 insertions(+), 172 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cca51e91/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MockitoMaker.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MockitoMaker.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MockitoMaker.java
deleted file mode 100644
index 28c2011..0000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MockitoMaker.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test;
-
-import static org.mockito.Mockito.*;
-
-/**
- * Helper class to create one-liner stubs, so that instead of: <pre>
- * SomeType someDescriptiveMock = mock(SomeType.class);
- * when(someDescriptiveMock.someMethod()).thenReturn(someValue);</pre>
- * <p>You can now do: <pre>
- * SomeType someDescriptiveMock = make(stub(SomeType.class)
- *     .returning(someValue).from.someMethod());</pre>
- */
-public class MockitoMaker {
-
-  /**
-   * Create a mock object from a mocked method call.
-   *
-   * @param <T> type of mocked object
-   * @param methodCall  for mocked object
-   * @return mocked object
-   */
-  @SuppressWarnings("unchecked")
-  public static <T> T make(Object methodCall) {
-    StubBuilder<T> sb = StubBuilder.current();
-    when(methodCall).thenReturn(sb.firstReturn, sb.laterReturns);
-    return (T) StubBuilder.current().from;
-  }
-
-  /**
-   * Create a stub builder of a mocked object.
-   *
-   * @param <T>     type of the target object to be mocked
-   * @param target  class of the target object to be mocked
-   * @return the stub builder of the mocked object
-   */
-  public static <T> StubBuilder<T> stub(Class<T> target) {
-    return new StubBuilder<T>(mock(target));
-  }
-
-  /**
-   * Builder class for stubs
-   * @param <T> type of the object to be mocked
-   */
-  public static class StubBuilder<T> {
-
-    /**
-     * The target mock object
-     */
-    public final T from;
-
-    // We want to be able to use this even when the tests are run in parallel.
-    @SuppressWarnings("rawtypes")
-    private static final ThreadLocal<StubBuilder> tls =
-        new ThreadLocal<StubBuilder>() {
-          @Override protected StubBuilder initialValue() {
-            return new StubBuilder();
-          }
-        };
-
-    private Object firstReturn = null;
-    private Object[] laterReturns = {};
-
-    /**
-     * Default constructor for the initial stub builder
-     */
-    public StubBuilder() {
-      this.from = null;
-    }
-
-    /**
-     * Construct a stub builder with a mock instance
-     *
-     * @param mockInstance  the mock object
-     */
-    public StubBuilder(T mockInstance) {
-      tls.set(this);
-      this.from = mockInstance;
-    }
-
-    /**
-     * Get the current stub builder from thread local
-     *
-     * @param <T>
-     * @return the stub builder of the mocked object
-     */
-    @SuppressWarnings("unchecked")
-    public static <T> StubBuilder<T> current() {
-      return tls.get();
-    }
-
-    /**
-     * Set the return value for the current stub builder
-     *
-     * @param value the return value
-     * @return the stub builder
-     */
-    public StubBuilder<T> returning(Object value) {
-      this.firstReturn = value;
-      return this;
-    }
-
-    /**
-     * Set the return values for the current stub builder
-     *
-     * @param value   the first return value
-     * @param values  the return values for later invocations
-     * @return the stub builder
-     */
-    public StubBuilder<T> returning(Object value, Object... values) {
-      this.firstReturn = value;
-      this.laterReturns = values;
-      return this;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cca51e91/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
index 40b8d0d..bd8e1dd 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
@@ -29,7 +29,6 @@ import static org.junit.Assert.*;
 
 import static org.mockito.Mockito.*;
 
-import static org.apache.hadoop.test.MockitoMaker.*;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -83,13 +82,14 @@ public class TestDiskChecker {
 
   private void _mkdirs(boolean exists, FsPermission before, FsPermission after)
       throws Throwable {
-    File localDir = make(stub(File.class).returning(exists).from.exists());
+    File localDir = mock(File.class);
+    when(localDir.exists()).thenReturn(exists);
     when(localDir.mkdir()).thenReturn(true);
     Path dir = mock(Path.class); // use default stubs
-    LocalFileSystem fs = make(stub(LocalFileSystem.class)
-        .returning(localDir).from.pathToFile(dir));
-    FileStatus stat = make(stub(FileStatus.class)
-        .returning(after).from.getPermission());
+    LocalFileSystem fs = mock(LocalFileSystem.class);
+    when(fs.pathToFile(dir)).thenReturn(localDir);
+    FileStatus stat = mock(FileStatus.class);
+    when(stat.getPermission()).thenReturn(after);
     when(fs.getFileStatus(dir)).thenReturn(stat);
 
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cca51e91/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/metrics/TestMRAppMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/metrics/TestMRAppMetrics.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/metrics/TestMRAppMetrics.java
index 8bfc2a8..02552bc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/metrics/TestMRAppMetrics.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/metrics/TestMRAppMetrics.java
@@ -23,7 +23,6 @@ import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 
 import static org.apache.hadoop.test.MetricsAsserts.*;
-import static org.apache.hadoop.test.MockitoMaker.*;
 
 import org.junit.Test;
 
@@ -33,10 +32,10 @@ public class TestMRAppMetrics {
 
   @Test public void testNames() {
     Job job = mock(Job.class);
-    Task mapTask = make(stub(Task.class).returning(TaskType.MAP).
-                        from.getType());
-    Task reduceTask = make(stub(Task.class).returning(TaskType.REDUCE).
-                           from.getType());
+    Task mapTask = mock(Task.class);
+    when(mapTask.getType()).thenReturn(TaskType.MAP);
+    Task reduceTask = mock(Task.class);
+    when(reduceTask.getType()).thenReturn(TaskType.REDUCE);
     MRAppMetrics metrics = MRAppMetrics.create();
 
     metrics.submittedJob(job);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cca51e91/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMRCJCFileInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMRCJCFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMRCJCFileInputFormat.java
index b806630..ef25876 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMRCJCFileInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMRCJCFileInputFormat.java
@@ -27,7 +27,6 @@ import org.junit.Test;
 import static org.junit.Assert.*;
 
 import static org.mockito.Mockito.*;
-import static org.apache.hadoop.test.MockitoMaker.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
@@ -80,12 +79,14 @@ public class TestMRCJCFileInputFormat {
   @Test
   public void testNumInputFiles() throws Exception {
     Configuration conf = spy(new Configuration());
-    Job job = make(stub(Job.class).returning(conf).from.getConfiguration());
-    FileStatus stat = make(stub(FileStatus.class).returning(0L).from.getLen());
+    Job mockedJob = mock(Job.class);
+    when(mockedJob.getConfiguration()).thenReturn(conf);
+    FileStatus stat = mock(FileStatus.class);
+    when(stat.getLen()).thenReturn(0L);
     TextInputFormat ispy = spy(new TextInputFormat());
-    doReturn(Arrays.asList(stat)).when(ispy).listStatus(job);
+    doReturn(Arrays.asList(stat)).when(ispy).listStatus(mockedJob);
 
-    ispy.getSplits(job);
+    ispy.getSplits(mockedJob);
     verify(conf).setLong(FileInputFormat.NUM_INPUT_FILES, 1);
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cca51e91/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
index 7fb2051..849ce1a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
@@ -20,14 +20,14 @@ package org.apache.hadoop.mapred;
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.apache.hadoop.test.MockitoMaker.make;
-import static org.apache.hadoop.test.MockitoMaker.stub;
 import static org.junit.Assert.assertTrue;
 import static org.jboss.netty.buffer.ChannelBuffers.wrappedBuffer;
 import static org.jboss.netty.handler.codec.http.HttpResponseStatus.OK;
 import static org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assume.assumeTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 import java.io.DataInputStream;
 import java.io.EOFException;
@@ -188,8 +188,8 @@ public class TestShuffleHandler {
   public void testShuffleMetrics() throws Exception {
     MetricsSystem ms = new MetricsSystemImpl();
     ShuffleHandler sh = new ShuffleHandler(ms);
-    ChannelFuture cf = make(stub(ChannelFuture.class).
-        returning(true, false).from.isSuccess());
+    ChannelFuture cf = mock(ChannelFuture.class);
+    when(cf.isSuccess()).thenReturn(true).thenReturn(false);
 
     sh.metrics.shuffleConnections.incr();
     sh.metrics.shuffleOutputBytes.incr(1*MiB);
@@ -1080,10 +1080,10 @@ public class TestShuffleHandler {
         new ArrayList<ShuffleHandler.ReduceMapFileCount>();
 
     final ChannelHandlerContext mockCtx =
-        Mockito.mock(ChannelHandlerContext.class);
-    final MessageEvent mockEvt = Mockito.mock(MessageEvent.class);
-    final Channel mockCh = Mockito.mock(AbstractChannel.class);
-    final ChannelPipeline mockPipeline = Mockito.mock(ChannelPipeline.class);
+        mock(ChannelHandlerContext.class);
+    final MessageEvent mockEvt = mock(MessageEvent.class);
+    final Channel mockCh = mock(AbstractChannel.class);
+    final ChannelPipeline mockPipeline = mock(ChannelPipeline.class);
 
     // Mock HttpRequest and ChannelFuture
     final HttpRequest mockHttpRequest = createMockHttpRequest();
@@ -1094,16 +1094,16 @@ public class TestShuffleHandler {
 
     // Mock Netty Channel Context and Channel behavior
     Mockito.doReturn(mockCh).when(mockCtx).getChannel();
-    Mockito.when(mockCh.getPipeline()).thenReturn(mockPipeline);
-    Mockito.when(mockPipeline.get(
+    when(mockCh.getPipeline()).thenReturn(mockPipeline);
+    when(mockPipeline.get(
         Mockito.any(String.class))).thenReturn(timerHandler);
-    Mockito.when(mockCtx.getChannel()).thenReturn(mockCh);
+    when(mockCtx.getChannel()).thenReturn(mockCh);
     Mockito.doReturn(mockFuture).when(mockCh).write(Mockito.any(Object.class));
-    Mockito.when(mockCh.write(Object.class)).thenReturn(mockFuture);
+    when(mockCh.write(Object.class)).thenReturn(mockFuture);
 
     //Mock MessageEvent behavior
     Mockito.doReturn(mockCh).when(mockEvt).getChannel();
-    Mockito.when(mockEvt.getChannel()).thenReturn(mockCh);
+    when(mockEvt.getChannel()).thenReturn(mockCh);
     Mockito.doReturn(mockHttpRequest).when(mockEvt).getMessage();
 
     final ShuffleHandler sh = new MockShuffleHandler();
@@ -1127,8 +1127,8 @@ public class TestShuffleHandler {
 
   public ChannelFuture createMockChannelFuture(Channel mockCh,
       final List<ShuffleHandler.ReduceMapFileCount> listenerList) {
-    final ChannelFuture mockFuture = Mockito.mock(ChannelFuture.class);
-    Mockito.when(mockFuture.getChannel()).thenReturn(mockCh);
+    final ChannelFuture mockFuture = mock(ChannelFuture.class);
+    when(mockFuture.getChannel()).thenReturn(mockCh);
     Mockito.doReturn(true).when(mockFuture).isSuccess();
     Mockito.doAnswer(new Answer() {
       @Override
@@ -1146,7 +1146,7 @@ public class TestShuffleHandler {
   }
 
   public HttpRequest createMockHttpRequest() {
-    HttpRequest mockHttpRequest = Mockito.mock(HttpRequest.class);
+    HttpRequest mockHttpRequest = mock(HttpRequest.class);
     Mockito.doReturn(HttpMethod.GET).when(mockHttpRequest).getMethod();
     Mockito.doAnswer(new Answer() {
       @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cca51e91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
index 13144e9..196d4c2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
@@ -21,8 +21,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.apache.hadoop.test.MockitoMaker.make;
-import static org.apache.hadoop.test.MockitoMaker.stub;
 import static org.junit.Assert.assertNull;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -226,8 +224,8 @@ public class TestQueueMetrics {
 
     QueueMetrics parentMetrics =
       QueueMetrics.forQueue(ms, parentQueueName, null, true, conf);
-    Queue parentQueue = make(stub(Queue.class).returning(parentMetrics).
-        from.getMetrics());
+    Queue parentQueue = mock(Queue.class);
+    when(parentQueue.getMetrics()).thenReturn(parentMetrics);
     QueueMetrics metrics =
       QueueMetrics.forQueue(ms, leafQueueName, parentQueue, true, conf);
     MetricsSource parentQueueSource = queueSource(ms, parentQueueName);
@@ -272,8 +270,8 @@ public class TestQueueMetrics {
 
     QueueMetrics parentMetrics =
       QueueMetrics.forQueue(ms, parentQueueName, null, true, conf);
-    Queue parentQueue = make(stub(Queue.class).returning(parentMetrics).
-        from.getMetrics());
+    Queue parentQueue = mock(Queue.class);
+    when(parentQueue.getMetrics()).thenReturn(parentMetrics);
     QueueMetrics metrics =
       QueueMetrics.forQueue(ms, leafQueueName, parentQueue, true, conf);
     MetricsSource parentQueueSource = queueSource(ms, parentQueueName);
@@ -359,8 +357,8 @@ public class TestQueueMetrics {
 
       QueueMetrics p1Metrics =
           QueueMetrics.forQueue(ms, p1, null, true, conf);
-      Queue parentQueue1 = make(stub(Queue.class).returning(p1Metrics).
-          from.getMetrics());
+      Queue parentQueue1 = mock(Queue.class);
+      when(parentQueue1.getMetrics()).thenReturn(p1Metrics);
       QueueMetrics metrics =
           QueueMetrics.forQueue(ms, leafQueueName, parentQueue1, true, conf);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: Addendum for YARN-5548. Use MockRMMemoryStateStore to reduce test failures (Bibin A Chundatt via Varun Saxena)

Posted by xg...@apache.org.
Addendum for YARN-5548. Use MockRMMemoryStateStore to reduce test failures (Bibin A Chundatt via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/11ece0bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/11ece0bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/11ece0bd

Branch: refs/heads/YARN-5734
Commit: 11ece0bda1f6e5dd9d0f828b7c29acacf6087baa
Parents: f66fd11
Author: Varun Saxena <va...@apache.org>
Authored: Fri Jul 28 00:10:45 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Fri Jul 28 00:10:45 2017 +0530

----------------------------------------------------------------------
 .../resourcemanager/MockRMMemoryStateStore.java | 32 ++++++++++++++++++++
 1 file changed, 32 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/11ece0bd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRMMemoryStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRMMemoryStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRMMemoryStateStore.java
new file mode 100644
index 0000000..d88ee1e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRMMemoryStateStore.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
+
+/**
+ * Test helper for MemoryRMStateStore will make sure the event.
+ */
+public class MockRMMemoryStateStore extends MemoryRMStateStore {
+  @SuppressWarnings("rawtypes")
+  @Override
+  protected EventHandler getRMStateStoreEventHandler() {
+    return rmStateStoreEventHandler;
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: Revert "MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which causes counter limits are not reset correctly. Contributed by Zhihai Xu."

Posted by xg...@apache.org.
Revert "MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which causes counter limits are not reset correctly. Contributed by Zhihai Xu."

This reverts commit 433542904aba5ddebf9bd9d299378647351eb13a.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e15f9282
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e15f9282
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e15f9282

Branch: refs/heads/YARN-5734
Commit: e15f92829558cc4a30b10f2fccfd17c2f8088003
Parents: 746189a
Author: Junping Du <ju...@apache.org>
Authored: Fri Jul 28 14:06:59 2017 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Fri Jul 28 14:06:59 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e15f9282/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
index e5db2e5..25c0630 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
@@ -94,7 +94,7 @@ public class HistoryViewer {
       final Configuration jobConf = new Configuration(conf);
       try {
         jobConf.addResource(fs.open(jobConfPath), jobConfPath.toString());
-        Limits.reset(jobConf);
+        Limits.reset(conf);
       } catch (FileNotFoundException fnf) {
         if (LOG.isWarnEnabled()) {
           LOG.warn("Missing job conf in history", fnf);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: YARN-5728. TestMiniYarnClusterNodeUtilization.testUpdateNodeUtilization timeout.

Posted by xg...@apache.org.
YARN-5728. TestMiniYarnClusterNodeUtilization.testUpdateNodeUtilization timeout.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8bed5e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8bed5e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8bed5e9

Branch: refs/heads/YARN-5734
Commit: f8bed5e9a7d1ece127fb3da123bbfc26ada0016f
Parents: 890e14c
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon Jul 31 11:09:13 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon Jul 31 11:09:13 2017 +0900

----------------------------------------------------------------------
 .../org/apache/hadoop/yarn/server/MiniYARNCluster.java | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8bed5e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
index 329d57e..de282fd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server;
 import java.io.File;
 import java.io.IOException;
 import java.net.InetAddress;
+import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.Collection;
 import java.util.Map;
@@ -36,6 +37,7 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.service.AbstractService;
@@ -446,7 +448,16 @@ public class MiniYARNCluster extends CompositeService {
 
   public static String getHostname() {
     try {
-      return InetAddress.getLocalHost().getHostName();
+      String hostname = InetAddress.getLocalHost().getHostName();
+      // Create InetSocketAddress to see whether it is resolved or not.
+      // If not, just return "localhost".
+      InetSocketAddress addr =
+          NetUtils.createSocketAddrForHost(hostname, 1);
+      if (addr.isUnresolved()) {
+        return "localhost";
+      } else {
+        return hostname;
+      }
     }
     catch (UnknownHostException ex) {
       throw new RuntimeException(ex);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: HADOOP-11875. [JDK9] Adding a second copy of Hamlet without _ as a one-character identifier.

Posted by xg...@apache.org.
HADOOP-11875. [JDK9] Adding a second copy of Hamlet without _ as a one-character identifier.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38c6fa5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38c6fa5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38c6fa5c

Branch: refs/heads/YARN-5734
Commit: 38c6fa5c7a61c7f6d4d2db5f12f9c60d477fb397
Parents: c6330f2
Author: Akira Ajisaka <aa...@apache.org>
Authored: Fri Jul 28 08:57:34 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Fri Jul 28 09:10:52 2017 +0900

----------------------------------------------------------------------
 .../mapreduce/v2/app/webapp/AppController.java  |    10 +-
 .../hadoop/mapreduce/v2/app/webapp/AppView.java |     4 +-
 .../mapreduce/v2/app/webapp/ConfBlock.java      |    36 +-
 .../mapreduce/v2/app/webapp/CountersBlock.java  |    38 +-
 .../mapreduce/v2/app/webapp/CountersPage.java   |     4 +-
 .../mapreduce/v2/app/webapp/InfoPage.java       |     2 +-
 .../mapreduce/v2/app/webapp/JobBlock.java       |    95 +-
 .../mapreduce/v2/app/webapp/JobConfPage.java    |     3 +-
 .../hadoop/mapreduce/v2/app/webapp/JobPage.java |     2 +-
 .../mapreduce/v2/app/webapp/JobsBlock.java      |    24 +-
 .../mapreduce/v2/app/webapp/NavBlock.java       |    38 +-
 .../v2/app/webapp/SingleCounterBlock.java       |    28 +-
 .../v2/app/webapp/SingleCounterPage.java        |     3 +-
 .../mapreduce/v2/app/webapp/TaskPage.java       |    20 +-
 .../mapreduce/v2/app/webapp/TasksBlock.java     |    12 +-
 .../mapreduce/v2/app/webapp/TasksPage.java      |     2 +-
 .../mapreduce/v2/hs/webapp/HsAboutPage.java     |     7 +-
 .../mapreduce/v2/hs/webapp/HsConfPage.java      |     2 +-
 .../mapreduce/v2/hs/webapp/HsCountersPage.java  |     4 +-
 .../mapreduce/v2/hs/webapp/HsJobBlock.java      |    98 +-
 .../mapreduce/v2/hs/webapp/HsJobPage.java       |     2 +-
 .../mapreduce/v2/hs/webapp/HsJobsBlock.java     |    46 +-
 .../mapreduce/v2/hs/webapp/HsLogsPage.java      |     2 +-
 .../mapreduce/v2/hs/webapp/HsNavBlock.java      |    30 +-
 .../v2/hs/webapp/HsSingleCounterPage.java       |     2 +-
 .../mapreduce/v2/hs/webapp/HsTaskPage.java      |    53 +-
 .../mapreduce/v2/hs/webapp/HsTasksBlock.java    |    57 +-
 .../mapreduce/v2/hs/webapp/HsTasksPage.java     |     2 +-
 .../hadoop/mapreduce/v2/hs/webapp/HsView.java   |     4 +-
 .../hadoop-yarn/hadoop-yarn-common/pom.xml      |    22 +
 .../apache/hadoop/yarn/webapp/ResponseInfo.java |     4 +-
 .../hadoop/yarn/webapp/example/HelloWorld.java  |     4 +-
 .../hadoop/yarn/webapp/example/MyApp.java       |     6 +-
 .../hadoop/yarn/webapp/hamlet/Hamlet.java       |     4 +
 .../hadoop/yarn/webapp/hamlet/HamletGen.java    |     2 +
 .../hadoop/yarn/webapp/hamlet/HamletImpl.java   |     2 +
 .../hadoop/yarn/webapp/hamlet/HamletSpec.java   |     2 +
 .../hadoop/yarn/webapp/hamlet/package-info.java |     6 +
 .../hadoop/yarn/webapp/hamlet2/Hamlet.java      | 30557 +++++++++++++++++
 .../hadoop/yarn/webapp/hamlet2/HamletGen.java   |   449 +
 .../hadoop/yarn/webapp/hamlet2/HamletImpl.java  |   385 +
 .../hadoop/yarn/webapp/hamlet2/HamletSpec.java  |  3101 ++
 .../yarn/webapp/hamlet2/package-info.java       |    27 +
 .../yarn/webapp/log/AggregatedLogsBlock.java    |    66 +-
 .../yarn/webapp/log/AggregatedLogsNavBlock.java |     4 +-
 .../yarn/webapp/log/AggregatedLogsPage.java     |     2 +-
 .../hadoop/yarn/webapp/view/ErrorPage.java      |    12 +-
 .../hadoop/yarn/webapp/view/FooterBlock.java    |     2 +-
 .../hadoop/yarn/webapp/view/HeaderBlock.java    |     6 +-
 .../hadoop/yarn/webapp/view/HtmlBlock.java      |     2 +-
 .../hadoop/yarn/webapp/view/HtmlPage.java       |    12 +-
 .../hadoop/yarn/webapp/view/InfoBlock.java      |    28 +-
 .../hadoop/yarn/webapp/view/JQueryUI.java       |    14 +-
 .../hadoop/yarn/webapp/view/LipsumBlock.java    |     4 +-
 .../hadoop/yarn/webapp/view/NavBlock.java       |    10 +-
 .../yarn/webapp/view/TwoColumnCssLayout.java    |    20 +-
 .../yarn/webapp/view/TwoColumnLayout.java       |    20 +-
 .../apache/hadoop/yarn/webapp/TestSubViews.java |    10 +-
 .../apache/hadoop/yarn/webapp/TestWebApp.java   |    28 +-
 .../hadoop/yarn/webapp/view/TestHtmlBlock.java  |    10 +-
 .../hadoop/yarn/webapp/view/TestHtmlPage.java   |     9 +-
 .../hadoop/yarn/webapp/view/TestInfoBlock.java  |     6 +-
 .../yarn/webapp/view/TestTwoColumnCssPage.java  |     6 +-
 .../webapp/AHSErrorsAndWarningsPage.java        |     2 +-
 .../webapp/AHSLogsPage.java                     |     2 +-
 .../webapp/AHSView.java                         |     4 +-
 .../webapp/AboutBlock.java                      |     8 +-
 .../webapp/AboutPage.java                       |     5 +-
 .../webapp/AppAttemptPage.java                  |     2 +-
 .../webapp/AppPage.java                         |     2 +-
 .../webapp/ContainerPage.java                   |     2 +-
 .../webapp/NavBlock.java                        |    28 +-
 .../yarn/server/webapp/AppAttemptBlock.java     |    31 +-
 .../hadoop/yarn/server/webapp/AppBlock.java     |    66 +-
 .../hadoop/yarn/server/webapp/AppsBlock.java    |    14 +-
 .../yarn/server/webapp/ContainerBlock.java      |    23 +-
 .../server/webapp/ErrorsAndWarningsBlock.java   |    50 +-
 .../nodemanager/webapp/AllApplicationsPage.java |    28 +-
 .../nodemanager/webapp/AllContainersPage.java   |    30 +-
 .../nodemanager/webapp/ApplicationPage.java     |    25 +-
 .../nodemanager/webapp/ContainerLogsPage.java   |    17 +-
 .../nodemanager/webapp/ContainerPage.java       |    31 +-
 .../webapp/NMErrorsAndWarningsPage.java         |     2 +-
 .../yarn/server/nodemanager/webapp/NMView.java  |     4 +-
 .../server/nodemanager/webapp/NavBlock.java     |    28 +-
 .../server/nodemanager/webapp/NodePage.java     |    28 +-
 .../resourcemanager/webapp/AboutBlock.java      |    20 +-
 .../resourcemanager/webapp/AboutPage.java       |     2 +-
 .../resourcemanager/webapp/AppAttemptPage.java  |     2 +-
 .../webapp/AppLogAggregationStatusPage.java     |     2 +-
 .../server/resourcemanager/webapp/AppPage.java  |     2 +-
 .../webapp/AppsBlockWithMetrics.java            |     5 +-
 .../webapp/CapacitySchedulerPage.java           |   220 +-
 .../resourcemanager/webapp/ContainerPage.java   |     2 +-
 .../webapp/DefaultSchedulerPage.java            |    58 +-
 .../resourcemanager/webapp/ErrorBlock.java      |     2 +-
 .../webapp/FairSchedulerAppsBlock.java          |    12 +-
 .../webapp/FairSchedulerPage.java               |   116 +-
 .../webapp/MetricsOverviewTable.java            |   124 +-
 .../server/resourcemanager/webapp/NavBlock.java |    34 +-
 .../resourcemanager/webapp/NodeLabelsPage.java  |    18 +-
 .../resourcemanager/webapp/NodesPage.java       |    16 +-
 .../webapp/RMAppAttemptBlock.java               |    48 +-
 .../resourcemanager/webapp/RMAppBlock.java      |    28 +-
 .../webapp/RMAppLogAggregationStatusBlock.java  |    34 +-
 .../resourcemanager/webapp/RMAppsBlock.java     |    15 +-
 .../webapp/RMErrorsAndWarningsPage.java         |     2 +-
 .../webapp/RedirectionErrorPage.java            |     2 +-
 .../server/resourcemanager/webapp/RmView.java   |     4 +-
 .../webapp/SchedulerPageUtil.java               |     8 +-
 .../webapp/SCMOverviewPage.java                 |    26 +-
 .../hadoop/yarn/server/webproxy/ProxyUtils.java |    16 +-
 .../server/webproxy/WebAppProxyServlet.java     |    14 +-
 113 files changed, 35635 insertions(+), 1101 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
index 1c90cb9..6db1274 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
@@ -84,11 +84,11 @@ public class AppController extends Controller implements AMParams {
   public void info() {
     AppInfo info = new AppInfo(app, app.context);
     info("Application Master Overview").
-      _("Application ID:", info.getId()).
-      _("Application Name:", info.getName()).
-      _("User:", info.getUser()).
-      _("Started on:", Times.format(info.getStartTime())).
-      _("Elasped: ", org.apache.hadoop.util.StringUtils.formatTime(
+        __("Application ID:", info.getId()).
+        __("Application Name:", info.getName()).
+        __("User:", info.getUser()).
+        __("Started on:", Times.format(info.getStartTime())).
+        __("Elasped: ", org.apache.hadoop.util.StringUtils.formatTime(
           info.getElapsedTime() ));
     render(InfoPage.class);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppView.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppView.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppView.java
index 7fde95b..7a3e2b3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppView.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppView.java
@@ -25,14 +25,14 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
 
 public class AppView extends TwoColumnLayout {
 
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     set(DATATABLES_ID, "jobs");
     set(initID(DATATABLES, "jobs"), jobsTableInit());
     setTableStyles(html, "jobs");
   }
 
-  protected void commonPreHead(Page.HTML<_> html) {
+  protected void commonPreHead(Page.HTML<__> html) {
     set(ACCORDION_ID, "nav");
     set(initID(ACCORDION, "nav"), "{autoHeight:false, active:1}");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
index 98a2ce1..76ef6bd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
@@ -30,10 +30,10 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.InputType;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.HamletSpec.InputType;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -56,21 +56,21 @@ public class ConfBlock extends HtmlBlock {
     String jid = $(JOB_ID);
     if (jid.isEmpty()) {
       html.
-        p()._("Sorry, can't do anything without a JobID.")._();
+        p().__("Sorry, can't do anything without a JobID.").__();
       return;
     }
     JobId jobID = MRApps.toJobID(jid);
     Job job = appContext.getJob(jobID);
     if (job == null) {
       html.
-        p()._("Sorry, ", jid, " not found.")._();
+        p().__("Sorry, ", jid, " not found.").__();
       return;
     }
     Path confPath = job.getConfFile();
     try {
       ConfInfo info = new ConfInfo(job);
 
-      html.div().a("/jobhistory/downloadconf/" + jid, confPath.toString())._();
+      html.div().a("/jobhistory/downloadconf/" + jid, confPath.toString()).__();
       TBODY<TABLE<Hamlet>> tbody = html.
         // Tasks table
       table("#conf").
@@ -79,8 +79,8 @@ public class ConfBlock extends HtmlBlock {
             th(_TH, "key").
             th(_TH, "value").
             th(_TH, "source chain").
-          _().
-        _().
+              __().
+              __().
       tbody();
       for (ConfEntryInfo entry : info.getProperties()) {
         StringBuffer buffer = new StringBuffer();
@@ -100,20 +100,20 @@ public class ConfBlock extends HtmlBlock {
             td(entry.getName()).
             td(entry.getValue()).
             td(buffer.toString()).
-          _();
+            __();
       }
-      tbody._().
+      tbody.__().
       tfoot().
         tr().
-          th().input("search_init").$type(InputType.text).$name("key").$value("key")._()._().
-          th().input("search_init").$type(InputType.text).$name("value").$value("value")._()._().
-          th().input("search_init").$type(InputType.text).$name("source chain").$value("source chain")._()._().
-          _().
-        _().
-      _();
+          th().input("search_init").$type(InputType.text).$name("key").$value("key").__().__().
+          th().input("search_init").$type(InputType.text).$name("value").$value("value").__().__().
+          th().input("search_init").$type(InputType.text).$name("source chain").$value("source chain").__().__().
+          __().
+          __().
+          __();
     } catch(IOException e) {
       LOG.error("Error while reading "+confPath, e);
-      html.p()._("Sorry got an error while reading conf file. ",confPath);
+      html.p().__("Sorry got an error while reading conf file. ", confPath);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
index 568658e..4b9e6f4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
@@ -34,13 +34,13 @@ import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TD;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.THEAD;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TD;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.THEAD;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TR;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -60,12 +60,12 @@ public class CountersBlock extends HtmlBlock {
   @Override protected void render(Block html) {
     if (job == null) {
       html.
-        p()._("Sorry, no counters for nonexistent", $(JOB_ID, "job"))._();
+        p().__("Sorry, no counters for nonexistent", $(JOB_ID, "job")).__();
       return;
     }
     if (!$(TASK_ID).isEmpty() && task == null) {
       html.
-        p()._("Sorry, no counters for nonexistent", $(TASK_ID, "task"))._();
+        p().__("Sorry, no counters for nonexistent", $(TASK_ID, "task")).__();
       return;
     }
     
@@ -75,7 +75,7 @@ public class CountersBlock extends HtmlBlock {
         type = $(JOB_ID, "the job");
       }
       html.
-        p()._("Sorry it looks like ",type," has no counters.")._();
+        p().__("Sorry it looks like ", type, " has no counters.").__();
       return;
     }
     
@@ -97,7 +97,7 @@ public class CountersBlock extends HtmlBlock {
         thead().
           tr().
             th(".group.ui-state-default", "Counter Group").
-            th(".ui-state-default", "Counters")._()._().
+            th(".ui-state-default", "Counters").__().__().
         tbody();
     for (CounterGroup g : total) {
       CounterGroup mg = map == null ? null : map.getGroup(g.getName());
@@ -109,7 +109,7 @@ public class CountersBlock extends HtmlBlock {
       TR<THEAD<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>>> groupHeadRow = tbody.
         tr().
           th().$title(g.getName()).$class("ui-state-default").
-            _(fixGroupDisplayName(g.getDisplayName()))._().
+          __(fixGroupDisplayName(g.getDisplayName())).__().
           td().$class(C_TABLE).
             table(".dt-counters").$id(job.getID()+"."+g.getName()).
               thead().
@@ -120,20 +120,20 @@ public class CountersBlock extends HtmlBlock {
       }
       // Ditto
       TBODY<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>> group = groupHeadRow.
-            th(map == null ? "Value" : "Total")._()._().
+            th(map == null ? "Value" : "Total").__().__().
         tbody();
       for (Counter counter : g) {
         // Ditto
         TR<TBODY<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>>> groupRow = group.
           tr();
           if (task == null && mg == null && rg == null) {
-            groupRow.td().$title(counter.getName())._(counter.getDisplayName()).
-            _();
+            groupRow.td().$title(counter.getName()).__(counter.getDisplayName()).
+                __();
           } else {
             groupRow.td().$title(counter.getName()).
               a(url(urlBase,urlId,g.getName(), 
                   counter.getName()), counter.getDisplayName()).
-            _();
+                __();
           }
         if (map != null) {
           Counter mc = mg == null ? null : mg.findCounter(counter.getName());
@@ -142,11 +142,11 @@ public class CountersBlock extends HtmlBlock {
             td(mc == null ? "0" : String.format("%,d", mc.getValue())).
             td(rc == null ? "0" : String.format("%,d", rc.getValue()));
         }
-        groupRow.td(String.format("%,d", counter.getValue()))._();
+        groupRow.td(String.format("%,d", counter.getValue())).__();
       }
-      group._()._()._()._();
+      group.__().__().__().__();
     }
-    tbody._()._()._();
+    tbody.__().__().__();
   }
 
   private void getCounters(AppContext ctx) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java
index d7afcd8..e780907 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java
@@ -25,7 +25,7 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
 
 public class CountersPage extends AppView {
 
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
 
     String tid = $(TASK_ID);
@@ -39,7 +39,7 @@ public class CountersPage extends AppView {
         "{bJQueryUI:true, sDom:'t', iDisplayLength:-1}");
   }
 
-  @Override protected void postHead(Page.HTML<_> html) {
+  @Override protected void postHead(Page.HTML<__> html) {
     html.
       style("#counters, .dt-counters { table-layout: fixed }",
             "#counters th { overflow: hidden; vertical-align: middle }",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/InfoPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/InfoPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/InfoPage.java
index 5163a01..3dd64f5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/InfoPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/InfoPage.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
 public class InfoPage extends AppView {
 
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     setTitle("About the Application Master");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java
index a599870..77ea55e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java
@@ -30,7 +30,6 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
 import java.util.Date;
 import java.util.List;
 
-import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
@@ -41,9 +40,9 @@ import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
 import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
@@ -60,14 +59,14 @@ public class JobBlock extends HtmlBlock {
     String jid = $(JOB_ID);
     if (jid.isEmpty()) {
       html.
-        p()._("Sorry, can't do anything without a JobID.")._();
+        p().__("Sorry, can't do anything without a JobID.").__();
       return;
     }
     JobId jobID = MRApps.toJobID(jid);
     Job job = appContext.getJob(jobID);
     if (job == null) {
       html.
-        p()._("Sorry, ", jid, " not found.")._();
+        p().__("Sorry, ", jid, " not found.").__();
       return;
     }
 
@@ -77,15 +76,15 @@ public class JobBlock extends HtmlBlock {
 
     JobInfo jinfo = new JobInfo(job, true);
     info("Job Overview").
-        _("Job Name:", jinfo.getName()).
-        _("User Name:", jinfo.getUserName()).
-        _("Queue Name:", jinfo.getQueueName()).
-        _("State:", jinfo.getState()).
-        _("Uberized:", jinfo.isUberized()).
-        _("Started:", new Date(jinfo.getStartTime())).
-        _("Elapsed:", StringUtils.formatTime(jinfo.getElapsedTime()));
+        __("Job Name:", jinfo.getName()).
+        __("User Name:", jinfo.getUserName()).
+        __("Queue Name:", jinfo.getQueueName()).
+        __("State:", jinfo.getState()).
+        __("Uberized:", jinfo.isUberized()).
+        __("Started:", new Date(jinfo.getStartTime())).
+        __("Elapsed:", StringUtils.formatTime(jinfo.getElapsedTime()));
     DIV<Hamlet> div = html.
-      _(InfoBlock.class).
+        __(InfoBlock.class).
       div(_INFO_WRAP);
 
     // MRAppMasters Table
@@ -93,13 +92,13 @@ public class JobBlock extends HtmlBlock {
     table.
       tr().
       th(amString).
-      _().
+        __().
       tr().
       th(_TH, "Attempt Number").
       th(_TH, "Start Time").
       th(_TH, "Node").
       th(_TH, "Logs").
-      _();
+        __();
     for (AMInfo amInfo : amInfos) {
       AMAttemptInfo attempt = new AMAttemptInfo(amInfo,
           jinfo.getId(), jinfo.getUserName());
@@ -109,14 +108,14 @@ public class JobBlock extends HtmlBlock {
         td(new Date(attempt.getStartTime()).toString()).
         td().a(".nodelink", url(MRWebAppUtil.getYARNWebappScheme(),
             attempt.getNodeHttpAddress()),
-            attempt.getNodeHttpAddress())._().
+            attempt.getNodeHttpAddress()).__().
         td().a(".logslink", url(attempt.getLogsLink()), 
-            "logs")._().
-        _();
+            "logs").__().
+          __();
     }
 
-    table._();
-    div._();
+    table.__();
+    div.__();
 
     html.div(_INFO_WRAP).        
       // Tasks table
@@ -127,30 +126,30 @@ public class JobBlock extends HtmlBlock {
             th(_TH, "Total").
             th(_TH, "Pending").
             th(_TH, "Running").
-            th(_TH, "Complete")._().
+            th(_TH, "Complete").__().
           tr(_ODD).
             th("Map").
             td().
               div(_PROGRESSBAR).
                 $title(join(jinfo.getMapProgressPercent(), '%')). // tooltip
                 div(_PROGRESSBAR_VALUE).
-                  $style(join("width:", jinfo.getMapProgressPercent(), '%'))._()._()._().
-            td().a(url("tasks", jid, "m", "ALL"),String.valueOf(jinfo.getMapsTotal()))._().
-            td().a(url("tasks", jid, "m", "PENDING"),String.valueOf(jinfo.getMapsPending()))._().
-            td().a(url("tasks", jid, "m", "RUNNING"),String.valueOf(jinfo.getMapsRunning()))._().
-            td().a(url("tasks", jid, "m", "COMPLETED"),String.valueOf(jinfo.getMapsCompleted()))._()._().
+                  $style(join("width:", jinfo.getMapProgressPercent(), '%')).__().__().__().
+            td().a(url("tasks", jid, "m", "ALL"), String.valueOf(jinfo.getMapsTotal())).__().
+            td().a(url("tasks", jid, "m", "PENDING"), String.valueOf(jinfo.getMapsPending())).__().
+            td().a(url("tasks", jid, "m", "RUNNING"), String.valueOf(jinfo.getMapsRunning())).__().
+            td().a(url("tasks", jid, "m", "COMPLETED"), String.valueOf(jinfo.getMapsCompleted())).__().__().
           tr(_EVEN).
             th("Reduce").
             td().
               div(_PROGRESSBAR).
                 $title(join(jinfo.getReduceProgressPercent(), '%')). // tooltip
                 div(_PROGRESSBAR_VALUE).
-                  $style(join("width:", jinfo.getReduceProgressPercent(), '%'))._()._()._().
-            td().a(url("tasks", jid, "r", "ALL"),String.valueOf(jinfo.getReducesTotal()))._().
-            td().a(url("tasks", jid, "r", "PENDING"),String.valueOf(jinfo.getReducesPending()))._().
-            td().a(url("tasks", jid, "r", "RUNNING"),String.valueOf(jinfo.getReducesRunning()))._().
-            td().a(url("tasks", jid, "r", "COMPLETED"),String.valueOf(jinfo.getReducesCompleted()))._()._()
-          ._().
+                  $style(join("width:", jinfo.getReduceProgressPercent(), '%')).__().__().__().
+            td().a(url("tasks", jid, "r", "ALL"), String.valueOf(jinfo.getReducesTotal())).__().
+            td().a(url("tasks", jid, "r", "PENDING"), String.valueOf(jinfo.getReducesPending())).__().
+            td().a(url("tasks", jid, "r", "RUNNING"), String.valueOf(jinfo.getReducesRunning())).__().
+            td().a(url("tasks", jid, "r", "COMPLETED"), String.valueOf(jinfo.getReducesCompleted())).__().__()
+          .__().
         // Attempts table
         table("#job").
         tr().
@@ -159,45 +158,45 @@ public class JobBlock extends HtmlBlock {
           th(_TH, "Running").
           th(_TH, "Failed").
           th(_TH, "Killed").
-          th(_TH, "Successful")._().
+          th(_TH, "Successful").__().
         tr(_ODD).
           th("Maps").
           td().a(url("attempts", jid, "m",
               TaskAttemptStateUI.NEW.toString()),
-              String.valueOf(jinfo.getNewMapAttempts()))._().
+              String.valueOf(jinfo.getNewMapAttempts())).__().
           td().a(url("attempts", jid, "m",
               TaskAttemptStateUI.RUNNING.toString()),
-              String.valueOf(jinfo.getRunningMapAttempts()))._().
+              String.valueOf(jinfo.getRunningMapAttempts())).__().
           td().a(url("attempts", jid, "m",
               TaskAttemptStateUI.FAILED.toString()),
-              String.valueOf(jinfo.getFailedMapAttempts()))._().
+              String.valueOf(jinfo.getFailedMapAttempts())).__().
           td().a(url("attempts", jid, "m",
               TaskAttemptStateUI.KILLED.toString()),
-              String.valueOf(jinfo.getKilledMapAttempts()))._().
+              String.valueOf(jinfo.getKilledMapAttempts())).__().
           td().a(url("attempts", jid, "m",
               TaskAttemptStateUI.SUCCESSFUL.toString()),
-              String.valueOf(jinfo.getSuccessfulMapAttempts()))._().
-        _().
+              String.valueOf(jinfo.getSuccessfulMapAttempts())).__().
+        __().
         tr(_EVEN).
           th("Reduces").
           td().a(url("attempts", jid, "r",
               TaskAttemptStateUI.NEW.toString()),
-              String.valueOf(jinfo.getNewReduceAttempts()))._().
+              String.valueOf(jinfo.getNewReduceAttempts())).__().
           td().a(url("attempts", jid, "r",
               TaskAttemptStateUI.RUNNING.toString()),
-              String.valueOf(jinfo.getRunningReduceAttempts()))._().
+              String.valueOf(jinfo.getRunningReduceAttempts())).__().
           td().a(url("attempts", jid, "r",
               TaskAttemptStateUI.FAILED.toString()),
-              String.valueOf(jinfo.getFailedReduceAttempts()))._().
+              String.valueOf(jinfo.getFailedReduceAttempts())).__().
           td().a(url("attempts", jid, "r",
               TaskAttemptStateUI.KILLED.toString()),
-              String.valueOf(jinfo.getKilledReduceAttempts()))._().
+              String.valueOf(jinfo.getKilledReduceAttempts())).__().
           td().a(url("attempts", jid, "r",
               TaskAttemptStateUI.SUCCESSFUL.toString()),
-              String.valueOf(jinfo.getSuccessfulReduceAttempts()))._().
-         _().
-       _().
-     _();
+              String.valueOf(jinfo.getSuccessfulReduceAttempts())).__().
+        __().
+        __().
+        __();
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobConfPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobConfPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobConfPage.java
index 983859e..4d6a3e2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobConfPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobConfPage.java
@@ -27,7 +27,6 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.postInitID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
 
-import org.apache.hadoop.mapreduce.v2.app.webapp.ConfBlock;
 import org.apache.hadoop.yarn.webapp.SubView;
 
 /**
@@ -39,7 +38,7 @@ public class JobConfPage extends AppView {
    * (non-Javadoc)
    * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
    */
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     String jobID = $(JOB_ID);
     set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID"
         : join("Configuration for MapReduce Job ", $(JOB_ID)));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java
index 00f4750..6508fb8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.yarn.webapp.SubView;
 
 public class JobPage extends AppView {
 
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     String jobID = $(JOB_ID);
     set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID"
                : join("MapReduce Job ", $(JOB_ID)));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java
index 720219e..ff4bc00 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java
@@ -25,9 +25,9 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -53,34 +53,34 @@ public class JobsBlock extends HtmlBlock {
             th("Maps Completed").
             th("Reduce Progress").
             th("Reduces Total").
-            th("Reduces Completed")._()._().
+            th("Reduces Completed").__().__().
         tbody();
     for (Job j : appContext.getAllJobs().values()) {
       JobInfo job = new JobInfo(j, false);
       tbody.
         tr().
           td().
-            span().$title(String.valueOf(job.getId()))._(). // for sorting
-            a(url("job", job.getId()), job.getId())._().
+            span().$title(String.valueOf(job.getId())).__(). // for sorting
+            a(url("job", job.getId()), job.getId()).__().
           td(job.getName()).
           td(job.getState()).
           td().
-            span().$title(job.getMapProgressPercent())._(). // for sorting
+            span().$title(job.getMapProgressPercent()).__(). // for sorting
             div(_PROGRESSBAR).
               $title(join(job.getMapProgressPercent(), '%')). // tooltip
               div(_PROGRESSBAR_VALUE).
-                $style(join("width:", job.getMapProgressPercent(), '%'))._()._()._().
+                $style(join("width:", job.getMapProgressPercent(), '%')).__().__().__().
           td(String.valueOf(job.getMapsTotal())).
           td(String.valueOf(job.getMapsCompleted())).
           td().
-            span().$title(job.getReduceProgressPercent())._(). // for sorting
+            span().$title(job.getReduceProgressPercent()).__(). // for sorting
             div(_PROGRESSBAR).
               $title(join(job.getReduceProgressPercent(), '%')). // tooltip
               div(_PROGRESSBAR_VALUE).
-                $style(join("width:", job.getReduceProgressPercent(), '%'))._()._()._().
+                $style(join("width:", job.getReduceProgressPercent(), '%')).__().__().__().
           td(String.valueOf(job.getReducesTotal())).
-          td(String.valueOf(job.getReducesCompleted()))._();
+          td(String.valueOf(job.getReducesCompleted())).__();
     }
-    tbody._()._();
+    tbody.__().__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
index 4eed7e3..58e1a43 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
@@ -25,8 +25,8 @@ import java.util.List;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -42,13 +42,13 @@ public class NavBlock extends HtmlBlock {
       div("#nav").
         h3("Cluster").
         ul().
-          li().a(url(rmweb, "cluster", "cluster"), "About")._().
-          li().a(url(rmweb, "cluster", "apps"), "Applications")._().
-          li().a(url(rmweb, "cluster", "scheduler"), "Scheduler")._()._().
+          li().a(url(rmweb, "cluster", "cluster"), "About").__().
+          li().a(url(rmweb, "cluster", "apps"), "Applications").__().
+          li().a(url(rmweb, "cluster", "scheduler"), "Scheduler").__().__().
         h3("Application").
         ul().
-          li().a(url("app/info"), "About")._().
-          li().a(url("app"), "Jobs")._()._();
+          li().a(url("app/info"), "About").__().
+          li().a(url("app"), "Jobs").__().__();
     if (app.getJob() != null) {
       String jobid = MRApps.toString(app.getJob().getID());
       List<AMInfo> amInfos = app.getJob().getAMInfos();
@@ -58,31 +58,31 @@ public class NavBlock extends HtmlBlock {
       nav.
         h3("Job").
         ul().
-          li().a(url("job", jobid), "Overview")._().
-          li().a(url("jobcounters", jobid), "Counters")._().
-          li().a(url("conf", jobid), "Configuration")._().
-          li().a(url("tasks", jobid, "m"), "Map tasks")._().
-          li().a(url("tasks", jobid, "r"), "Reduce tasks")._().
+          li().a(url("job", jobid), "Overview").__().
+          li().a(url("jobcounters", jobid), "Counters").__().
+          li().a(url("conf", jobid), "Configuration").__().
+          li().a(url("tasks", jobid, "m"), "Map tasks").__().
+          li().a(url("tasks", jobid, "r"), "Reduce tasks").__().
           li().a(".logslink", url(MRWebAppUtil.getYARNWebappScheme(),
               nodeHttpAddress, "node",
               "containerlogs", thisAmInfo.getContainerId().toString(), 
               app.getJob().getUserName()), 
-              "AM Logs")._()._();
+              "AM Logs").__().__();
       if (app.getTask() != null) {
         String taskid = MRApps.toString(app.getTask().getID());
         nav.
           h3("Task").
           ul().
-            li().a(url("task", taskid), "Task Overview")._().
-            li().a(url("taskcounters", taskid), "Counters")._()._();
+            li().a(url("task", taskid), "Task Overview").__().
+            li().a(url("taskcounters", taskid), "Counters").__().__();
       }
     }
     nav.
       h3("Tools").
       ul().
-        li().a("/conf", "Configuration")._().
-        li().a("/logs", "Local logs")._().
-        li().a("/stacks", "Server stacks")._().
-        li().a("/jmx?qry=Hadoop:*", "Server metrics")._()._()._();
+        li().a("/conf", "Configuration").__().
+        li().a("/logs", "Local logs").__().
+        li().a("/stacks", "Server stacks").__().
+        li().a("/jmx?qry=Hadoop:*", "Server metrics").__().__().__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterBlock.java
index c4311e9..02fb226 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterBlock.java
@@ -39,11 +39,11 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TR;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -62,12 +62,12 @@ public class SingleCounterBlock extends HtmlBlock {
   @Override protected void render(Block html) {
     if (job == null) {
       html.
-        p()._("Sorry, no counters for nonexistent", $(JOB_ID, "job"))._();
+        p().__("Sorry, no counters for nonexistent", $(JOB_ID, "job")).__();
       return;
     }
     if (!$(TASK_ID).isEmpty() && task == null) {
       html.
-        p()._("Sorry, no counters for nonexistent", $(TASK_ID, "task"))._();
+        p().__("Sorry, no counters for nonexistent", $(TASK_ID, "task")).__();
       return;
     }
     
@@ -79,7 +79,7 @@ public class SingleCounterBlock extends HtmlBlock {
         thead().
           tr().
             th(".ui-state-default", columnType).
-            th(".ui-state-default", "Value")._()._().
+            th(".ui-state-default", "Value").__().__().
           tbody();
     for (Map.Entry<String, Long> entry : values.entrySet()) {
       TR<TBODY<TABLE<DIV<Hamlet>>>> row = tbody.tr();
@@ -87,16 +87,16 @@ public class SingleCounterBlock extends HtmlBlock {
       String val = entry.getValue().toString();
       if(task != null) {
         row.td(id);
-        row.td().br().$title(val)._()._(val)._();
+        row.td().br().$title(val).__().__(val).__();
       } else {
         row.td().a(url("singletaskcounter",entry.getKey(),
-            $(COUNTER_GROUP), $(COUNTER_NAME)), id)._();
-        row.td().br().$title(val)._().a(url("singletaskcounter",entry.getKey(),
-            $(COUNTER_GROUP), $(COUNTER_NAME)), val)._();
+            $(COUNTER_GROUP), $(COUNTER_NAME)), id).__();
+        row.td().br().$title(val).__().a(url("singletaskcounter", entry.getKey(),
+            $(COUNTER_GROUP), $(COUNTER_NAME)), val).__();
       }
-      row._();
+      row.__();
     }
-    tbody._()._()._();
+    tbody.__().__().__();
   }
 
   private void populateMembers(AppContext ctx) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterPage.java
index 729b5a8..6fc1f82 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterPage.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.mapreduce.v2.app.webapp;
 import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_ID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
 
-import org.apache.hadoop.mapreduce.v2.app.webapp.SingleCounterBlock;
 import org.apache.hadoop.yarn.webapp.SubView;
 
 /**
@@ -33,7 +32,7 @@ public class SingleCounterPage extends AppView {
    * (non-Javadoc)
    * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
    */
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     String tid = $(TASK_ID);
     String activeNav = "3";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
index 01c5b0d..bd7f7a9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
@@ -38,11 +38,11 @@ import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
 import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.webapp.SubView;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.THEAD;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.THEAD;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TR;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -100,7 +100,7 @@ public class TaskPage extends AppView {
             .append("  }\n")
             .append("}\n");
 
-        html.script().$type("text/javascript")._(script.toString())._();
+        html.script().$type("text/javascript").__(script.toString()).__();
       }
 
       TR<THEAD<TABLE<Hamlet>>> tr = html.table("#attempts").thead().tr();
@@ -118,7 +118,7 @@ public class TaskPage extends AppView {
         tr.th(".actions", "Actions");
       }
 
-      TBODY<TABLE<Hamlet>> tbody = tr._()._().tbody();
+      TBODY<TABLE<Hamlet>> tbody = tr.__().__().tbody();
       // Write all the data into a JavaScript array of arrays for JQuery
       // DataTables to display
       StringBuilder attemptsTableData = new StringBuilder("[\n");
@@ -178,9 +178,9 @@ public class TaskPage extends AppView {
       }
       attemptsTableData.append("]");
       html.script().$type("text/javascript").
-      _("var attemptsTableData=" + attemptsTableData)._();
+          __("var attemptsTableData=" + attemptsTableData).__();
 
-      tbody._()._();
+      tbody.__().__();
 
     }
 
@@ -197,7 +197,7 @@ public class TaskPage extends AppView {
     }
   }
 
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
 
     set(initID(ACCORDION, "nav"), "{autoHeight:false, active:3}");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java
index 78338ec..8d92dd3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java
@@ -30,9 +30,9 @@ import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -65,7 +65,7 @@ public class TasksBlock extends HtmlBlock {
             th("State").
             th("Start Time").
             th("Finish Time").
-            th("Elapsed Time")._()._().
+            th("Elapsed Time").__().__().
         tbody();
     StringBuilder tasksTableData = new StringBuilder("[\n");
 
@@ -117,8 +117,8 @@ public class TasksBlock extends HtmlBlock {
     }
     tasksTableData.append("]");
     html.script().$type("text/javascript").
-    _("var tasksTableData=" + tasksTableData)._();
+        __("var tasksTableData=" + tasksTableData).__();
 
-    tbody._()._();
+    tbody.__().__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java
index e2f12dc..8fce395 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java
@@ -24,7 +24,7 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
 
 public class TasksPage extends AppView {
 
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     set(DATATABLES_ID, "tasks");
     set(initID(ACCORDION, "nav"), "{autoHeight:false, active:2}");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java
index f607599..d544c6b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.mapreduce.v2.hs.webapp;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
 
-import org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer;
 import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.HistoryInfo;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.SubView;
@@ -36,7 +35,7 @@ public class HsAboutPage extends HsView {
    * (non-Javadoc)
    * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
    */
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     //override the nav config from commonPReHead
     set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
@@ -49,9 +48,9 @@ public class HsAboutPage extends HsView {
   @Override protected Class<? extends SubView> content() {
     HistoryInfo info = new HistoryInfo();
     info("History Server").
-      _("BuildVersion", info.getHadoopBuildVersion()
+        __("BuildVersion", info.getHadoopBuildVersion()
         + " on " + info.getHadoopVersionBuiltOn()).
-      _("History Server started on", Times.format(info.getStartedOn()));
+        __("History Server started on", Times.format(info.getStartedOn()));
     return InfoBlock.class;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsConfPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsConfPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsConfPage.java
index 8431e22..c08ee5c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsConfPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsConfPage.java
@@ -39,7 +39,7 @@ public class HsConfPage extends HsView {
    * (non-Javadoc)
    * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
    */
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     String jobID = $(JOB_ID);
     set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID"
         : join("Configuration for MapReduce Job ", $(JOB_ID)));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsCountersPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsCountersPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsCountersPage.java
index e70a668..1632a97 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsCountersPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsCountersPage.java
@@ -32,7 +32,7 @@ public class HsCountersPage extends HsView {
    * (non-Javadoc)
    * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
    */
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     setActiveNavColumnForTask();
     set(DATATABLES_SELECTOR, "#counters .dt-counters");
@@ -44,7 +44,7 @@ public class HsCountersPage extends HsView {
    * (non-Javadoc)
    * @see org.apache.hadoop.yarn.webapp.view.TwoColumnLayout#postHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
    */
-  @Override protected void postHead(Page.HTML<_> html) {
+  @Override protected void postHead(Page.HTML<__> html) {
     html.
       style("#counters, .dt-counters { table-layout: fixed }",
             "#counters th { overflow: hidden; vertical-align: middle }",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
index 0d5b03a..18040f0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
@@ -43,9 +43,9 @@ import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
@@ -69,38 +69,38 @@ public class HsJobBlock extends HtmlBlock {
     String jid = $(JOB_ID);
     if (jid.isEmpty()) {
       html.
-        p()._("Sorry, can't do anything without a JobID.")._();
+        p().__("Sorry, can't do anything without a JobID.").__();
       return;
     }
     JobId jobID = MRApps.toJobID(jid);
     Job j = appContext.getJob(jobID);
     if (j == null) {
-      html.p()._("Sorry, ", jid, " not found.")._();
+      html.p().__("Sorry, ", jid, " not found.").__();
       return;
     }
     if(j instanceof UnparsedJob) {
       final int taskCount = j.getTotalMaps() + j.getTotalReduces();
       UnparsedJob oversizedJob = (UnparsedJob) j;
-      html.p()._("The job has a total of " + taskCount + " tasks. ")
-          ._("Any job larger than " + oversizedJob.getMaxTasksAllowed() +
-              " will not be loaded.")._();
-      html.p()._("You can either use the CLI tool: 'mapred job -history'"
+      html.p().__("The job has a total of " + taskCount + " tasks. ")
+          .__("Any job larger than " + oversizedJob.getMaxTasksAllowed() +
+              " will not be loaded.").__();
+      html.p().__("You can either use the CLI tool: 'mapred job -history'"
           + " to view large jobs or adjust the property " +
-          JHAdminConfig.MR_HS_LOADED_JOBS_TASKS_MAX + ".")._();
+          JHAdminConfig.MR_HS_LOADED_JOBS_TASKS_MAX + ".").__();
       return;
     }
     List<AMInfo> amInfos = j.getAMInfos();
     JobInfo job = new JobInfo(j);
     ResponseInfo infoBlock = info("Job Overview").
-        _("Job Name:", job.getName()).
-        _("User Name:", job.getUserName()).
-        _("Queue:", job.getQueueName()).
-        _("State:", job.getState()).
-        _("Uberized:", job.isUber()).
-        _("Submitted:", new Date(job.getSubmitTime())).
-        _("Started:", job.getStartTimeStr()).
-        _("Finished:", new Date(job.getFinishTime())).
-        _("Elapsed:", StringUtils.formatTime(
+        __("Job Name:", job.getName()).
+        __("User Name:", job.getUserName()).
+        __("Queue:", job.getQueueName()).
+        __("State:", job.getState()).
+        __("Uberized:", job.isUber()).
+        __("Submitted:", new Date(job.getSubmitTime())).
+        __("Started:", job.getStartTimeStr()).
+        __("Finished:", new Date(job.getFinishTime())).
+        __("Elapsed:", StringUtils.formatTime(
             Times.elapsed(job.getStartTime(), job.getFinishTime(), false)));
     
     String amString =
@@ -117,19 +117,19 @@ public class HsJobBlock extends HtmlBlock {
     }
 
     if(job.getNumMaps() > 0) {
-      infoBlock._("Average Map Time", StringUtils.formatTime(job.getAvgMapTime()));
+      infoBlock.__("Average Map Time", StringUtils.formatTime(job.getAvgMapTime()));
     }
     if(job.getNumReduces() > 0) {
-      infoBlock._("Average Shuffle Time", StringUtils.formatTime(job.getAvgShuffleTime()));
-      infoBlock._("Average Merge Time", StringUtils.formatTime(job.getAvgMergeTime()));
-      infoBlock._("Average Reduce Time", StringUtils.formatTime(job.getAvgReduceTime()));
+      infoBlock.__("Average Shuffle Time", StringUtils.formatTime(job.getAvgShuffleTime()));
+      infoBlock.__("Average Merge Time", StringUtils.formatTime(job.getAvgMergeTime()));
+      infoBlock.__("Average Reduce Time", StringUtils.formatTime(job.getAvgReduceTime()));
     }
 
     for (ConfEntryInfo entry : job.getAcls()) {
-      infoBlock._("ACL "+entry.getName()+":", entry.getValue());
+      infoBlock.__("ACL "+entry.getName()+":", entry.getValue());
     }
     DIV<Hamlet> div = html.
-      _(InfoBlock.class).
+        __(InfoBlock.class).
       div(_INFO_WRAP);
     
       // MRAppMasters Table
@@ -137,13 +137,13 @@ public class HsJobBlock extends HtmlBlock {
         table.
           tr().
             th(amString).
-          _().
+            __().
           tr().
             th(_TH, "Attempt Number").
             th(_TH, "Start Time").
             th(_TH, "Node").
             th(_TH, "Logs").
-            _();
+            __();
         boolean odd = false;
           for (AMInfo amInfo : amInfos) {
             AMAttemptInfo attempt = new AMAttemptInfo(amInfo,
@@ -153,13 +153,13 @@ public class HsJobBlock extends HtmlBlock {
               td(new Date(attempt.getStartTime()).toString()).
               td().a(".nodelink", url(MRWebAppUtil.getYARNWebappScheme(),
                   attempt.getNodeHttpAddress()),
-                  attempt.getNodeHttpAddress())._().
+                  attempt.getNodeHttpAddress()).__().
               td().a(".logslink", url(attempt.getLogsLink()),
-                      "logs")._().
-            _();
+                      "logs").__().
+                __();
           }
-          table._();
-          div._();
+          table.__();
+          div.__();
           
         
         html.div(_INFO_WRAP).        
@@ -169,18 +169,18 @@ public class HsJobBlock extends HtmlBlock {
           tr().
             th(_TH, "Task Type").
             th(_TH, "Total").
-            th(_TH, "Complete")._().
+            th(_TH, "Complete").__().
           tr(_ODD).
             th().
-              a(url("tasks", jid, "m"), "Map")._().
+              a(url("tasks", jid, "m"), "Map").__().
             td(String.valueOf(String.valueOf(job.getMapsTotal()))).
-            td(String.valueOf(String.valueOf(job.getMapsCompleted())))._().
+            td(String.valueOf(String.valueOf(job.getMapsCompleted()))).__().
           tr(_EVEN).
             th().
-              a(url("tasks", jid, "r"), "Reduce")._().
+              a(url("tasks", jid, "r"), "Reduce").__().
             td(String.valueOf(String.valueOf(job.getReducesTotal()))).
-            td(String.valueOf(String.valueOf(job.getReducesCompleted())))._()
-          ._().
+            td(String.valueOf(String.valueOf(job.getReducesCompleted()))).__()
+          .__().
 
         // Attempts table
         table("#job").
@@ -188,33 +188,33 @@ public class HsJobBlock extends HtmlBlock {
           th(_TH, "Attempt Type").
           th(_TH, "Failed").
           th(_TH, "Killed").
-          th(_TH, "Successful")._().
+          th(_TH, "Successful").__().
         tr(_ODD).
           th("Maps").
           td().a(url("attempts", jid, "m",
               TaskAttemptStateUI.FAILED.toString()), 
-              String.valueOf(job.getFailedMapAttempts()))._().
+              String.valueOf(job.getFailedMapAttempts())).__().
           td().a(url("attempts", jid, "m",
               TaskAttemptStateUI.KILLED.toString()), 
-              String.valueOf(job.getKilledMapAttempts()))._().
+              String.valueOf(job.getKilledMapAttempts())).__().
           td().a(url("attempts", jid, "m",
               TaskAttemptStateUI.SUCCESSFUL.toString()), 
-              String.valueOf(job.getSuccessfulMapAttempts()))._().
-        _().
+              String.valueOf(job.getSuccessfulMapAttempts())).__().
+            __().
         tr(_EVEN).
           th("Reduces").
           td().a(url("attempts", jid, "r",
               TaskAttemptStateUI.FAILED.toString()), 
-              String.valueOf(job.getFailedReduceAttempts()))._().
+              String.valueOf(job.getFailedReduceAttempts())).__().
           td().a(url("attempts", jid, "r",
               TaskAttemptStateUI.KILLED.toString()), 
-              String.valueOf(job.getKilledReduceAttempts()))._().
+              String.valueOf(job.getKilledReduceAttempts())).__().
           td().a(url("attempts", jid, "r",
               TaskAttemptStateUI.SUCCESSFUL.toString()), 
-              String.valueOf(job.getSuccessfulReduceAttempts()))._().
-         _().
-       _().
-     _();
+              String.valueOf(job.getSuccessfulReduceAttempts())).__().
+            __().
+            __().
+            __();
   }
 
   static String addTaskLinks(String text) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobPage.java
index 4c81a13..f40c878 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobPage.java
@@ -34,7 +34,7 @@ public class HsJobPage extends HsView {
    * (non-Javadoc)
    * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
    */
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     String jobID = $(JOB_ID);
     set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID"
                : join("MapReduce Job ", $(JOB_ID)));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java
index b234ca3..ef563f6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java
@@ -27,10 +27,10 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.util.Times;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.InputType;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.HamletSpec.InputType;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -69,7 +69,7 @@ public class HsJobsBlock extends HtmlBlock {
             th("Maps Completed").
             th("Reduces Total").
             th("Reduces Completed").
-            th("Elapsed Time")._()._().
+            th("Elapsed Time").__().__().
         tbody();
     LOG.info("Getting list of all Jobs.");
     // Write all the data into a JavaScript array of arrays for JQuery
@@ -105,38 +105,38 @@ public class HsJobsBlock extends HtmlBlock {
     }
     jobsTableData.append("]");
     html.script().$type("text/javascript").
-    _("var jobsTableData=" + jobsTableData)._();
-    tbody._().
+        __("var jobsTableData=" + jobsTableData).__();
+    tbody.__().
     tfoot().
       tr().
         th().input("search_init").$type(InputType.text)
-          .$name("submit_time").$value("Submit Time")._()._().
+          .$name("submit_time").$value("Submit Time").__().__().
         th().input("search_init").$type(InputType.text)
-          .$name("start_time").$value("Start Time")._()._().
+          .$name("start_time").$value("Start Time").__().__().
         th().input("search_init").$type(InputType.text)
-          .$name("finish_time").$value("Finish Time")._()._().
+          .$name("finish_time").$value("Finish Time").__().__().
         th().input("search_init").$type(InputType.text)
-          .$name("job_id").$value("Job ID")._()._().
+          .$name("job_id").$value("Job ID").__().__().
         th().input("search_init").$type(InputType.text)
-          .$name("name").$value("Name")._()._().
+          .$name("name").$value("Name").__().__().
         th().input("search_init").$type(InputType.text)
-          .$name("user").$value("User")._()._().
+          .$name("user").$value("User").__().__().
         th().input("search_init").$type(InputType.text)
-          .$name("queue").$value("Queue")._()._().
+          .$name("queue").$value("Queue").__().__().
         th().input("search_init").$type(InputType.text)
-          .$name("state").$value("State")._()._().
+          .$name("state").$value("State").__().__().
         th().input("search_init").$type(InputType.text)
-          .$name("maps_total").$value("Maps Total")._()._().
+          .$name("maps_total").$value("Maps Total").__().__().
         th().input("search_init").$type(InputType.text).
-          $name("maps_completed").$value("Maps Completed")._()._().
+          $name("maps_completed").$value("Maps Completed").__().__().
         th().input("search_init").$type(InputType.text).
-          $name("reduces_total").$value("Reduces Total")._()._().
+          $name("reduces_total").$value("Reduces Total").__().__().
         th().input("search_init").$type(InputType.text).
-          $name("reduces_completed").$value("Reduces Completed")._()._().
+          $name("reduces_completed").$value("Reduces Completed").__().__().
         th().input("search_init").$type(InputType.text).
-          $name("elapsed_time").$value("Elapsed Time")._()._().
-        _().
-      _().
-    _();
+          $name("elapsed_time").$value("Elapsed Time").__().__().
+        __().
+        __().
+        __();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsLogsPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsLogsPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsLogsPage.java
index f483dc9..2bee3ba 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsLogsPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsLogsPage.java
@@ -26,7 +26,7 @@ public class HsLogsPage extends HsView {
    * (non-Javadoc)
    * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
    */
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     setActiveNavColumnForTask();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsNavBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsNavBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsNavBlock.java
index 7e49d52..9ef5a0f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsNavBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsNavBlock.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.mapreduce.v2.hs.webapp;
 
 import org.apache.hadoop.mapreduce.v2.app.webapp.App;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -43,33 +43,33 @@ public class HsNavBlock extends HtmlBlock {
       div("#nav").
       h3("Application").
         ul().
-          li().a(url("about"), "About")._().
-          li().a(url("app"), "Jobs")._()._();
+          li().a(url("about"), "About").__().
+          li().a(url("app"), "Jobs").__().__();
     if (app.getJob() != null) {
       String jobid = MRApps.toString(app.getJob().getID());
       nav.
         h3("Job").
         ul().
-          li().a(url("job", jobid), "Overview")._().
-          li().a(url("jobcounters", jobid), "Counters")._().
-          li().a(url("conf", jobid), "Configuration")._().
-          li().a(url("tasks", jobid, "m"), "Map tasks")._().
-          li().a(url("tasks", jobid, "r"), "Reduce tasks")._()._();
+          li().a(url("job", jobid), "Overview").__().
+          li().a(url("jobcounters", jobid), "Counters").__().
+          li().a(url("conf", jobid), "Configuration").__().
+          li().a(url("tasks", jobid, "m"), "Map tasks").__().
+          li().a(url("tasks", jobid, "r"), "Reduce tasks").__().__();
       if (app.getTask() != null) {
         String taskid = MRApps.toString(app.getTask().getID());
         nav.
           h3("Task").
           ul().
-            li().a(url("task", taskid), "Task Overview")._().
-            li().a(url("taskcounters", taskid), "Counters")._()._();
+            li().a(url("task", taskid), "Task Overview").__().
+            li().a(url("taskcounters", taskid), "Counters").__().__();
       }
     }
     nav.
       h3("Tools").
         ul().
-          li().a("/conf", "Configuration")._().
-          li().a("/logs", "Local logs")._().
-          li().a("/stacks", "Server stacks")._().
-          li().a("/jmx?qry=Hadoop:*", "Server metrics")._()._()._();
+          li().a("/conf", "Configuration").__().
+          li().a("/logs", "Local logs").__().
+          li().a("/stacks", "Server stacks").__().
+          li().a("/jmx?qry=Hadoop:*", "Server metrics").__().__().__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsSingleCounterPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsSingleCounterPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsSingleCounterPage.java
index 5f97b8f..bc2c2c8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsSingleCounterPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsSingleCounterPage.java
@@ -32,7 +32,7 @@ public class HsSingleCounterPage extends HsView {
    * (non-Javadoc)
    * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
    */
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     setActiveNavColumnForTask();
     set(DATATABLES_ID, "singleCounter");


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: HADOOP-14677. mvn clean compile fails. Contributed by Andras Bokor.

Posted by xg...@apache.org.
HADOOP-14677. mvn clean compile fails. Contributed by Andras Bokor.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0fd6d0f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0fd6d0f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0fd6d0f2

Branch: refs/heads/YARN-5734
Commit: 0fd6d0f2d361536b2baf859ddbb082eb2eadcfcf
Parents: 481385e
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon Jul 31 16:59:24 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon Jul 31 17:00:10 2017 +0900

----------------------------------------------------------------------
 hadoop-maven-plugins/pom.xml | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fd6d0f2/hadoop-maven-plugins/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml
index 9831456..2ff93f7 100644
--- a/hadoop-maven-plugins/pom.xml
+++ b/hadoop-maven-plugins/pom.xml
@@ -80,13 +80,16 @@
   <build>
     <plugins>
       <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+      </plugin>
+      <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-plugin-plugin</artifactId>
         <version>${maven.plugin-tools.version}</version>
         <executions>
           <execution>
             <id>default-descriptor</id>
-            <phase>process-classes</phase>
+            <phase>compile</phase>
           </execution>
         </executions>
       </plugin>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: YARN-5953:Create CLI for changing YARN configurations. (Jonathan Hung via xgong)

Posted by xg...@apache.org.
YARN-5953:Create CLI for changing YARN configurations. (Jonathan Hung via xgong)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7915ee3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7915ee3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7915ee3d

Branch: refs/heads/YARN-5734
Commit: 7915ee3da856f31529a1e08563339a6a70b46ba4
Parents: e69113b
Author: Xuan <xg...@apache.org>
Authored: Fri Jul 7 14:16:46 2017 -0700
Committer: Xuan <xg...@apache.org>
Committed: Mon Jul 31 08:59:25 2017 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/hadoop-yarn/bin/yarn        |   4 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd    |   5 +
 .../hadoop/yarn/client/cli/SchedConfCLI.java    | 238 +++++++++++++++++++
 .../yarn/client/cli/TestSchedConfCLI.java       | 160 +++++++++++++
 .../hadoop/yarn/webapp/dao/package-info.java    |  27 +++
 .../yarn/webapp/util/YarnWebServiceUtils.java   |  14 ++
 .../ConfigurationMutationACLPolicy.java         |   2 +-
 .../DefaultConfigurationMutationACLPolicy.java  |   2 +-
 .../scheduler/MutableConfScheduler.java         |   2 +-
 .../scheduler/MutableConfigurationProvider.java |   2 +-
 .../scheduler/capacity/CapacityScheduler.java   |   2 +-
 .../conf/MutableCSConfigurationProvider.java    |   4 +-
 ...ueueAdminConfigurationMutationACLPolicy.java |   4 +-
 .../resourcemanager/webapp/RMWebServices.java   |   1 +
 .../webapp/dao/QueueConfigInfo.java             |   4 +-
 .../webapp/dao/SchedConfUpdateInfo.java         |  18 +-
 .../TestConfigurationMutationACLPolicies.java   |   4 +-
 .../TestMutableCSConfigurationProvider.java     |   4 +-
 .../TestRMWebServicesConfigurationMutation.java |  65 +++--
 19 files changed, 508 insertions(+), 54 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/bin/yarn
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index cf6457b..21656fe 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -46,6 +46,7 @@ function hadoop_usage
   hadoop_add_subcommand "queue" "prints queue information"
   hadoop_add_subcommand "resourcemanager" "run the ResourceManager"
   hadoop_add_subcommand "rmadmin" "admin tools"
+  hadoop_add_subcommand "schedconf" "modify scheduler configuration"
   hadoop_add_subcommand "scmadmin" "SharedCacheManager admin tools"
   hadoop_add_subcommand "sharedcachemanager" "run the SharedCacheManager daemon"
   hadoop_add_subcommand "timelinereader" "run the timeline reader server"
@@ -137,6 +138,9 @@ function yarncmd_case
     rmadmin)
       HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.RMAdminCLI'
     ;;
+    schedconf)
+      HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.SchedConfCLI'
+    ;;
     scmadmin)
       HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.SCMAdmin'
     ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
index ca879f5..8b72394 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
@@ -285,6 +285,11 @@ goto :eof
   set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
   goto :eof
 
+:schedconf
+  set CLASS=org.apache.hadoop.yarn.client.cli.SchedConfCLI
+  set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+  goto :eof
+
 @rem This changes %1, %2 etc. Hence those cannot be used after calling this.
 :make_command_arguments
   if "%1" == "--config" (

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
new file mode 100644
index 0000000..e17062e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
@@ -0,0 +1,238 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.client.cli;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.MissingArgumentException;
+import org.apache.commons.cli.Options;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import org.apache.hadoop.yarn.webapp.util.YarnWebServiceUtils;
+
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response.Status;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * CLI for modifying scheduler configuration.
+ */
+@Public
+@Evolving
+public class SchedConfCLI extends Configured implements Tool {
+
+  private static final String ADD_QUEUES_OPTION = "addQueues";
+  private static final String REMOVE_QUEUES_OPTION = "removeQueues";
+  private static final String UPDATE_QUEUES_OPTION = "updateQueues";
+  private static final String GLOBAL_OPTIONS = "globalUpdates";
+  private static final String HELP_CMD = "help";
+
+  private static final String CONF_ERR_MSG = "Specify configuration key " +
+      "value as confKey=confVal.";
+
+  public SchedConfCLI() {
+    super(new YarnConfiguration());
+  }
+
+  public static void main(String[] args) throws Exception {
+    SchedConfCLI cli = new SchedConfCLI();
+    int exitCode = cli.run(args);
+    System.exit(exitCode);
+  }
+
+  @Override
+  public int run(String[] args) throws Exception {
+    Options opts = new Options();
+
+    opts.addOption("add", ADD_QUEUES_OPTION, true,
+        "Add queues with configurations");
+    opts.addOption("remove", REMOVE_QUEUES_OPTION, true,
+        "Remove queues");
+    opts.addOption("update", UPDATE_QUEUES_OPTION, true,
+        "Update queue configurations");
+    opts.addOption("global", GLOBAL_OPTIONS, true,
+        "Update global scheduler configurations");
+    opts.addOption("h", HELP_CMD, false, "Displays help for all commands.");
+
+    int exitCode = -1;
+    CommandLine parsedCli = null;
+    try {
+      parsedCli = new GnuParser().parse(opts, args);
+    } catch (MissingArgumentException ex) {
+      System.err.println("Missing argument for options");
+      printUsage();
+      return exitCode;
+    }
+
+    if (parsedCli.hasOption(HELP_CMD)) {
+      printUsage();
+      return 0;
+    }
+
+    boolean hasOption = false;
+    SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
+    try {
+      if (parsedCli.hasOption(ADD_QUEUES_OPTION)) {
+        hasOption = true;
+        addQueues(parsedCli.getOptionValue(ADD_QUEUES_OPTION), updateInfo);
+      }
+      if (parsedCli.hasOption(REMOVE_QUEUES_OPTION)) {
+        hasOption = true;
+        removeQueues(parsedCli.getOptionValue(REMOVE_QUEUES_OPTION),
+            updateInfo);
+      }
+      if (parsedCli.hasOption(UPDATE_QUEUES_OPTION)) {
+        hasOption = true;
+        updateQueues(parsedCli.getOptionValue(UPDATE_QUEUES_OPTION),
+            updateInfo);
+      }
+      if (parsedCli.hasOption(GLOBAL_OPTIONS)) {
+        hasOption = true;
+        globalUpdates(parsedCli.getOptionValue(GLOBAL_OPTIONS), updateInfo);
+      }
+    } catch (IllegalArgumentException e) {
+      System.err.println(e.getMessage());
+      return -1;
+    }
+
+    if (!hasOption) {
+      System.err.println("Invalid Command Usage: ");
+      printUsage();
+      return -1;
+    }
+
+    Client webServiceClient = Client.create();
+    WebResource webResource = webServiceClient.resource(WebAppUtils.
+        getRMWebAppURLWithScheme(getConf()));
+    ClientResponse response = webResource.path("ws").path("v1").path("cluster")
+        .path("sched-conf").accept(MediaType.APPLICATION_JSON)
+        .entity(YarnWebServiceUtils.toJson(updateInfo,
+            SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
+        .put(ClientResponse.class);
+    if (response != null) {
+      if (response.getStatus() == Status.OK.getStatusCode()) {
+        System.out.println("Configuration changed successfully.");
+        return 0;
+      } else {
+        System.err.println("Configuration change unsuccessful: "
+            + response.getEntity(String.class));
+      }
+    } else {
+      System.err.println("Configuration change unsuccessful: null response");
+    }
+    return -1;
+  }
+
+  @VisibleForTesting
+  void addQueues(String args, SchedConfUpdateInfo updateInfo) {
+    if (args == null) {
+      return;
+    }
+    ArrayList<QueueConfigInfo> queueConfigInfos = new ArrayList<>();
+    for (String arg : args.split(";")) {
+      queueConfigInfos.add(getQueueConfigInfo(arg));
+    }
+    updateInfo.setAddQueueInfo(queueConfigInfos);
+  }
+
+  @VisibleForTesting
+  void removeQueues(String args, SchedConfUpdateInfo updateInfo) {
+    if (args == null) {
+      return;
+    }
+    List<String> queuesToRemove = Arrays.asList(args.split(","));
+    updateInfo.setRemoveQueueInfo(new ArrayList<>(queuesToRemove));
+  }
+
+  @VisibleForTesting
+  void updateQueues(String args, SchedConfUpdateInfo updateInfo) {
+    if (args == null) {
+      return;
+    }
+    ArrayList<QueueConfigInfo> queueConfigInfos = new ArrayList<>();
+    for (String arg : args.split(";")) {
+      queueConfigInfos.add(getQueueConfigInfo(arg));
+    }
+    updateInfo.setUpdateQueueInfo(queueConfigInfos);
+  }
+
+  @VisibleForTesting
+  void globalUpdates(String args, SchedConfUpdateInfo updateInfo) {
+    if (args == null) {
+      return;
+    }
+    HashMap<String, String> globalUpdates = new HashMap<>();
+    for (String globalUpdate : args.split(",")) {
+      putKeyValuePair(globalUpdates, globalUpdate);
+    }
+    updateInfo.setGlobalParams(globalUpdates);
+  }
+
+  private QueueConfigInfo getQueueConfigInfo(String arg) {
+    String[] queueArgs = arg.split(",");
+    String queuePath = queueArgs[0];
+    Map<String, String> queueConfigs = new HashMap<>();
+    for (int i = 1; i < queueArgs.length; ++i) {
+      putKeyValuePair(queueConfigs, queueArgs[i]);
+    }
+    return new QueueConfigInfo(queuePath, queueConfigs);
+  }
+
+  private void putKeyValuePair(Map<String, String> kv, String args) {
+    String[] argParts = args.split("=");
+    if (argParts.length == 1) {
+      if (argParts[0].isEmpty() || !args.contains("=")) {
+        throw new IllegalArgumentException(CONF_ERR_MSG);
+      } else {
+        // key specified, but no value e.g. "confKey="
+        kv.put(argParts[0], null);
+      }
+    } else if (argParts.length > 2) {
+      throw new IllegalArgumentException(CONF_ERR_MSG);
+    } else {
+      if (argParts[0].isEmpty()) {
+        throw new IllegalArgumentException(CONF_ERR_MSG);
+      }
+      kv.put(argParts[0], argParts[1]);
+    }
+  }
+
+  private void printUsage() {
+    System.out.println("yarn schedconf [-add queueAddPath1,confKey1=confVal1,"
+        + "confKey2=confVal2;queueAddPath2,confKey3=confVal3] "
+        + "[-remove queueRemovePath1,queueRemovePath2] "
+        + "[-update queueUpdatePath1,confKey1=confVal1] "
+        + "[-global globalConfKey1=globalConfVal1,"
+        + "globalConfKey2=globalConfVal2]");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java
new file mode 100644
index 0000000..d2f0639
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java
@@ -0,0 +1,160 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.client.cli;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.util.List;
+import java.util.Map;
+import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Class for testing {@link SchedConfCLI}.
+ */
+public class TestSchedConfCLI {
+
+  private ByteArrayOutputStream sysOutStream;
+  private PrintStream sysOut;
+
+  private ByteArrayOutputStream sysErrStream;
+  private PrintStream sysErr;
+
+  private SchedConfCLI cli;
+
+  @Before
+  public void setUp() {
+    sysOutStream = new ByteArrayOutputStream();
+    sysOut =  new PrintStream(sysOutStream);
+    System.setOut(sysOut);
+
+    sysErrStream = new ByteArrayOutputStream();
+    sysErr = new PrintStream(sysErrStream);
+    System.setErr(sysErr);
+
+    cli = new SchedConfCLI();
+  }
+
+  @Test(timeout = 10000)
+  public void testInvalidConf() throws Exception {
+    // conf pair with no key should be invalid
+    int exitCode = cli.run(new String[] {"-add", "root.a,=confVal"});
+    assertTrue("Should return an error code", exitCode != 0);
+    assertTrue(sysErrStream.toString().contains("Specify configuration key " +
+        "value as confKey=confVal."));
+    exitCode = cli.run(new String[] {"-update", "root.a,=confVal"});
+    assertTrue("Should return an error code", exitCode != 0);
+    assertTrue(sysErrStream.toString().contains("Specify configuration key " +
+        "value as confKey=confVal."));
+
+    exitCode = cli.run(new String[] {"-add", "root.a,confKey=confVal=conf"});
+    assertTrue("Should return an error code", exitCode != 0);
+    assertTrue(sysErrStream.toString().contains("Specify configuration key " +
+        "value as confKey=confVal."));
+    exitCode = cli.run(new String[] {"-update", "root.a,confKey=confVal=c"});
+    assertTrue("Should return an error code", exitCode != 0);
+    assertTrue(sysErrStream.toString().contains("Specify configuration key " +
+        "value as confKey=confVal."));
+  }
+
+  @Test(timeout = 10000)
+  public void testAddQueues() {
+    SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo();
+    cli.addQueues("root.a,a1=aVal1,a2=aVal2," +
+        "a3=", schedUpdateInfo);
+    QueueConfigInfo addInfo = schedUpdateInfo.getAddQueueInfo().get(0);
+    assertEquals("root.a", addInfo.getQueue());
+    Map<String, String> params = addInfo.getParams();
+    assertEquals(3, params.size());
+    assertEquals("aVal1", params.get("a1"));
+    assertEquals("aVal2", params.get("a2"));
+    assertNull(params.get("a3"));
+
+    schedUpdateInfo = new SchedConfUpdateInfo();
+    cli.addQueues("root.b,b1=bVal1;root.c,c1=cVal1", schedUpdateInfo);
+    assertEquals(2, schedUpdateInfo.getAddQueueInfo().size());
+    QueueConfigInfo bAddInfo = schedUpdateInfo.getAddQueueInfo().get(0);
+    assertEquals("root.b", bAddInfo.getQueue());
+    Map<String, String> bParams = bAddInfo.getParams();
+    assertEquals(1, bParams.size());
+    assertEquals("bVal1", bParams.get("b1"));
+    QueueConfigInfo cAddInfo = schedUpdateInfo.getAddQueueInfo().get(1);
+    assertEquals("root.c", cAddInfo.getQueue());
+    Map<String, String> cParams = cAddInfo.getParams();
+    assertEquals(1, cParams.size());
+    assertEquals("cVal1", cParams.get("c1"));
+  }
+
+  @Test(timeout = 10000)
+  public void testRemoveQueues() {
+    SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo();
+    cli.removeQueues("root.a,root.b,root.c.c1", schedUpdateInfo);
+    List<String> removeInfo = schedUpdateInfo.getRemoveQueueInfo();
+    assertEquals(3, removeInfo.size());
+    assertEquals("root.a", removeInfo.get(0));
+    assertEquals("root.b", removeInfo.get(1));
+    assertEquals("root.c.c1", removeInfo.get(2));
+  }
+
+  @Test(timeout = 10000)
+  public void testUpdateQueues() {
+    SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo();
+    cli.updateQueues("root.a,a1=aVal1,a2=aVal2," +
+        "a3=", schedUpdateInfo);
+    QueueConfigInfo updateInfo = schedUpdateInfo.getUpdateQueueInfo().get(0);
+    assertEquals("root.a", updateInfo.getQueue());
+    Map<String, String> params = updateInfo.getParams();
+    assertEquals(3, params.size());
+    assertEquals("aVal1", params.get("a1"));
+    assertEquals("aVal2", params.get("a2"));
+    assertNull(params.get("a3"));
+
+    schedUpdateInfo = new SchedConfUpdateInfo();
+    cli.updateQueues("root.b,b1=bVal1;root.c,c1=cVal1", schedUpdateInfo);
+    assertEquals(2, schedUpdateInfo.getUpdateQueueInfo().size());
+    QueueConfigInfo bUpdateInfo = schedUpdateInfo.getUpdateQueueInfo().get(0);
+    assertEquals("root.b", bUpdateInfo.getQueue());
+    Map<String, String> bParams = bUpdateInfo.getParams();
+    assertEquals(1, bParams.size());
+    assertEquals("bVal1", bParams.get("b1"));
+    QueueConfigInfo cUpdateInfo = schedUpdateInfo.getUpdateQueueInfo().get(1);
+    assertEquals("root.c", cUpdateInfo.getQueue());
+    Map<String, String> cParams = cUpdateInfo.getParams();
+    assertEquals(1, cParams.size());
+    assertEquals("cVal1", cParams.get("c1"));
+  }
+
+  @Test(timeout = 10000)
+  public void testGlobalUpdate() {
+    SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo();
+    cli.globalUpdates("schedKey1=schedVal1,schedKey2=schedVal2",
+        schedUpdateInfo);
+    Map<String, String> globalInfo = schedUpdateInfo.getGlobalParams();
+    assertEquals(2, globalInfo.size());
+    assertEquals("schedVal1", globalInfo.get("schedKey1"));
+    assertEquals("schedVal2", globalInfo.get("schedKey2"));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/package-info.java
new file mode 100644
index 0000000..aec6762
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/package-info.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Data structures for scheduler configuration mutation info.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.webapp.dao;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
index 4167e21..1cf1e97 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
@@ -23,9 +23,14 @@ import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.UniformInterfaceException;
 import com.sun.jersey.api.client.WebResource;
 import javax.ws.rs.core.MediaType;
+
+import com.sun.jersey.api.json.JSONJAXBContext;
+import com.sun.jersey.api.json.JSONMarshaller;
 import org.apache.hadoop.conf.Configuration;
 import org.codehaus.jettison.json.JSONObject;
 
+import java.io.StringWriter;
+
 /**
  * This class contains several utility function which could be used to generate
  * Restful calls to RM/NM/AHS.
@@ -59,4 +64,13 @@ public final class YarnWebServiceUtils {
         .get(ClientResponse.class);
     return response.getEntity(JSONObject.class);
   }
+
+  @SuppressWarnings("rawtypes")
+  public static String toJson(Object nsli, Class klass) throws Exception {
+    StringWriter sw = new StringWriter();
+    JSONJAXBContext ctx = new JSONJAXBContext(klass);
+    JSONMarshaller jm = ctx.createJSONMarshaller();
+    jm.marshallToJSON(nsli, sw);
+    return sw.toString();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
index 3a388fe..5bc5874 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 
 /**
  * Interface for determining whether configuration mutations are allowed.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
index 6648668..1de6f6b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 
 /**
  * Default configuration mutation ACL policy. Checks if user is YARN admin.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
index 027d944..007dc29 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 
 import java.io.IOException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index 6b8306c..86be7c3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 
 import java.io.IOException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 6f637a9..8a54013 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -137,11 +137,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.Placeme
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SimplePlacementSet;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AppPriorityACLsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
 import org.apache.hadoop.yarn.server.utils.Lock;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
index eb97260..670c0f9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore.LogMutation;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 
 import java.io.IOException;
 import java.util.ArrayList;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
index 0a82d50..ee53fd1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
@@ -27,8 +27,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ConfigurationMutationACLPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 
 import java.io.IOException;
 import java.util.HashSet;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index ae1ebad..798b93f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -192,6 +192,7 @@ import org.apache.hadoop.yarn.webapp.BadRequestException;
 import org.apache.hadoop.yarn.webapp.ForbiddenException;
 import org.apache.hadoop.yarn.webapp.NotFoundException;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.inject.Inject;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
index b20eda6..d1d91c2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+package org.apache.hadoop.yarn.webapp.dao;
 
 import java.util.HashMap;
 import java.util.Map;
@@ -54,4 +54,4 @@ public class QueueConfigInfo {
     return this.params;
   }
 
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
index b7c585e..bb84096 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+package org.apache.hadoop.yarn.webapp.dao;
 
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -54,16 +54,32 @@ public class SchedConfUpdateInfo {
     return addQueueInfo;
   }
 
+  public void setAddQueueInfo(ArrayList<QueueConfigInfo> addQueueInfo) {
+    this.addQueueInfo = addQueueInfo;
+  }
+
   public ArrayList<String> getRemoveQueueInfo() {
     return removeQueueInfo;
   }
 
+  public void setRemoveQueueInfo(ArrayList<String> removeQueueInfo) {
+    this.removeQueueInfo = removeQueueInfo;
+  }
+
   public ArrayList<QueueConfigInfo> getUpdateQueueInfo() {
     return updateQueueInfo;
   }
 
+  public void setUpdateQueueInfo(ArrayList<QueueConfigInfo> updateQueueInfo) {
+    this.updateQueueInfo = updateQueueInfo;
+  }
+
   @XmlElementWrapper(name = "global-updates")
   public HashMap<String, String> getGlobalParams() {
     return global;
   }
+
+  public void setGlobalParams(HashMap<String, String> globalInfo) {
+    this.global = globalInfo;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
index 0f5a3d8..398e909 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
@@ -25,8 +25,8 @@ import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.QueueAdminConfigurationMutationACLPolicy;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 import org.junit.Before;
 import org.junit.Test;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
index 3216781..9104f16 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
@@ -23,8 +23,8 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 import org.junit.Before;
 import org.junit.Test;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7915ee3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
index 5fbe36f..26ef1b7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
@@ -22,8 +22,6 @@ import com.google.inject.Guice;
 import com.google.inject.servlet.ServletModule;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.api.json.JSONJAXBContext;
-import com.sun.jersey.api.json.JSONMarshaller;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 import org.apache.hadoop.conf.Configuration;
@@ -35,11 +33,12 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
 import org.apache.hadoop.yarn.webapp.JerseyTestBase;
+import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.util.YarnWebServiceUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -50,7 +49,6 @@ import javax.ws.rs.core.Response.Status;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.StringWriter;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -183,8 +181,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
         r.path("ws").path("v1").path("cluster")
             .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
-                MediaType.APPLICATION_JSON)
+            .entity(YarnWebServiceUtils.toJson(updateInfo,
+                SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
 
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -218,8 +216,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
         r.path("ws").path("v1").path("cluster")
             .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
-                MediaType.APPLICATION_JSON)
+            .entity(YarnWebServiceUtils.toJson(updateInfo,
+                SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
 
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -244,8 +242,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
         r.path("ws").path("v1").path("cluster")
             .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
-                MediaType.APPLICATION_JSON)
+            .entity(YarnWebServiceUtils.toJson(updateInfo,
+                SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
 
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -269,8 +267,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
         r.path("ws").path("v1").path("cluster")
             .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
-                MediaType.APPLICATION_JSON)
+            .entity(YarnWebServiceUtils.toJson(updateInfo,
+                SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
 
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -300,8 +298,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
         r.path("ws").path("v1").path("cluster")
             .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
-                MediaType.APPLICATION_JSON)
+            .entity(YarnWebServiceUtils.toJson(updateInfo,
+                SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
 
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -332,8 +330,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
         r.path("ws").path("v1").path("cluster")
             .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
-                MediaType.APPLICATION_JSON)
+            .entity(YarnWebServiceUtils.toJson(updateInfo,
+                SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
 
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -360,8 +358,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
         r.path("ws").path("v1").path("cluster")
             .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
-                MediaType.APPLICATION_JSON)
+            .entity(YarnWebServiceUtils.toJson(updateInfo,
+                SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
     CapacitySchedulerConfiguration newCSConf =
@@ -395,8 +393,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
         r.path("ws").path("v1").path("cluster")
             .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
-                MediaType.APPLICATION_JSON)
+            .entity(YarnWebServiceUtils.toJson(updateInfo,
+                SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
     CapacitySchedulerConfiguration newCSConf = cs.getConfiguration();
@@ -413,8 +411,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
         r.path("ws").path("v1").path("cluster")
             .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
-                MediaType.APPLICATION_JSON)
+            .entity(YarnWebServiceUtils.toJson(updateInfo,
+                SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
     newCSConf = cs.getConfiguration();
@@ -443,8 +441,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
         r.path("ws").path("v1").path("cluster")
             .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
-                MediaType.APPLICATION_JSON)
+            .entity(YarnWebServiceUtils.toJson(updateInfo,
+                SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
     CapacitySchedulerConfiguration newCSConf =
@@ -468,8 +466,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
         r.path("ws").path("v1").path("cluster")
             .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
-                MediaType.APPLICATION_JSON)
+            .entity(YarnWebServiceUtils.toJson(updateInfo,
+                SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
     CapacitySchedulerConfiguration newCSConf =
@@ -483,8 +481,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
         r.path("ws").path("v1").path("cluster")
             .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
-                MediaType.APPLICATION_JSON)
+            .entity(YarnWebServiceUtils.toJson(updateInfo,
+                SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
     newCSConf =
@@ -506,13 +504,4 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
     }
     super.tearDown();
   }
-
-  @SuppressWarnings("rawtypes")
-  private String toJson(Object nsli, Class klass) throws Exception {
-    StringWriter sw = new StringWriter();
-    JSONJAXBContext ctx = new JSONJAXBContext(klass);
-    JSONMarshaller jm = ctx.createJSONMarshaller();
-    jm.marshallToJSON(nsli, sw);
-    return sw.toString();
-  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: HDFS-12171. Reduce IIP object allocations for inode lookup. Contributed by Daryn Sharp.

Posted by xg...@apache.org.
HDFS-12171. Reduce IIP object allocations for inode lookup. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a68b5b31
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a68b5b31
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a68b5b31

Branch: refs/heads/YARN-5734
Commit: a68b5b31cf846c0fc94c430bafd07a9bca369234
Parents: 6d983cc
Author: Kihwal Lee <ki...@apache.org>
Authored: Tue Jul 25 11:03:09 2017 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Tue Jul 25 11:03:49 2017 -0500

----------------------------------------------------------------------
 .../server/namenode/EncryptionZoneManager.java   |  5 ++---
 .../server/namenode/FSDirErasureCodingOp.java    |  5 ++---
 .../hdfs/server/namenode/INodesInPath.java       | 19 ++-----------------
 .../server/namenode/TestSnapshotPathINodes.java  |  5 ++---
 4 files changed, 8 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a68b5b31/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 6dff62b..96e189b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -209,9 +209,8 @@ public class EncryptionZoneManager {
     if (!hasCreatedEncryptionZone()) {
       return null;
     }
-    List<INode> inodes = iip.getReadOnlyINodes();
-    for (int i = inodes.size() - 1; i >= 0; i--) {
-      final INode inode = inodes.get(i);
+    for (int i = iip.length() - 1; i >= 0; i--) {
+      final INode inode = iip.getINode(i);
       if (inode != null) {
         final EncryptionZoneInt ezi = encryptionZones.get(inode.getId());
         if (ezi != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a68b5b31/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 681f217..486503c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -355,9 +355,8 @@ final class FSDirErasureCodingOp {
     Preconditions.checkNotNull(iip, "INodes cannot be null");
     fsd.readLock();
     try {
-      List<INode> inodes = iip.getReadOnlyINodes();
-      for (int i = inodes.size() - 1; i >= 0; i--) {
-        final INode inode = inodes.get(i);
+      for (int i = iip.length() - 1; i >= 0; i--) {
+        final INode inode = iip.getINode(i);
         if (inode == null) {
           continue;
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a68b5b31/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index abc8b63..8235bf0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -18,9 +18,6 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.NoSuchElementException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -336,17 +333,9 @@ public class INodesInPath {
    *         otherwise, i < 0, return the (length + i)-th inode.
    */
   public INode getINode(int i) {
-    if (inodes == null || inodes.length == 0) {
-      throw new NoSuchElementException("inodes is null or empty");
-    }
-    int index = i >= 0 ? i : inodes.length + i;
-    if (index < inodes.length && index >= 0) {
-      return inodes[index];
-    } else {
-      throw new NoSuchElementException("inodes.length == " + inodes.length);
-    }
+    return inodes[(i < 0) ? inodes.length + i : i];
   }
-  
+
   /** @return the last inode. */
   public INode getLastINode() {
     return getINode(-1);
@@ -384,10 +373,6 @@ public class INodesInPath {
     return inodes.length;
   }
 
-  public List<INode> getReadOnlyINodes() {
-    return Collections.unmodifiableList(Arrays.asList(inodes));
-  }
-
   public INode[] getINodesArray() {
     INode[] retArr = new INode[inodes.length];
     System.arraycopy(inodes, 0, retArr, 0, inodes.length);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a68b5b31/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
index d1d915e..b62a418 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
@@ -310,9 +310,8 @@ public class TestSnapshotPathINodes {
   }
 
   private int getNumNonNull(INodesInPath iip) {
-    List<INode> inodes = iip.getReadOnlyINodes();
-    for (int i = inodes.size() - 1; i >= 0; i--) {
-      if (inodes.get(i) != null) {
+    for (int i = iip.length() - 1; i >= 0; i--) {
+      if (iip.getINode(i) != null) {
         return i+1;
       }
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: HDFS-11896. Non-dfsUsed will be doubled on dead node re-registration. Contributed by Brahma Reddy Battula.

Posted by xg...@apache.org.
HDFS-11896. Non-dfsUsed will be doubled on dead node re-registration. Contributed by Brahma Reddy Battula.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4a85c69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4a85c69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4a85c69

Branch: refs/heads/YARN-5734
Commit: c4a85c694fae3f814ab4e7f3c172da1df0e0e353
Parents: 11ece0b
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Thu Jul 27 12:02:57 2017 -0700
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Thu Jul 27 12:02:57 2017 -0700

----------------------------------------------------------------------
 .../blockmanagement/DatanodeDescriptor.java     | 19 ++++---
 .../hadoop/hdfs/server/datanode/DataNode.java   |  2 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  | 53 ++++++++++++++++++++
 3 files changed, 65 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4a85c69/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 57348a3..2bd4a20 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -331,11 +331,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
   }
 
   public void resetBlocks() {
-    setCapacity(0);
-    setRemaining(0);
-    setBlockPoolUsed(0);
-    setDfsUsed(0);
-    setXceiverCount(0);
+    updateStorageStats(this.getStorageReports(), 0L, 0L, 0, 0, null);
     this.invalidateBlocks.clear();
     this.volumeFailures = 0;
     // pendingCached, cached, and pendingUncached are protected by the
@@ -384,6 +380,16 @@ public class DatanodeDescriptor extends DatanodeInfo {
   public void updateHeartbeatState(StorageReport[] reports, long cacheCapacity,
       long cacheUsed, int xceiverCount, int volFailures,
       VolumeFailureSummary volumeFailureSummary) {
+    updateStorageStats(reports, cacheCapacity, cacheUsed, xceiverCount,
+        volFailures, volumeFailureSummary);
+    setLastUpdate(Time.now());
+    setLastUpdateMonotonic(Time.monotonicNow());
+    rollBlocksScheduled(getLastUpdateMonotonic());
+  }
+
+  private void updateStorageStats(StorageReport[] reports, long cacheCapacity,
+      long cacheUsed, int xceiverCount, int volFailures,
+      VolumeFailureSummary volumeFailureSummary) {
     long totalCapacity = 0;
     long totalRemaining = 0;
     long totalBlockPoolUsed = 0;
@@ -434,8 +440,6 @@ public class DatanodeDescriptor extends DatanodeInfo {
     setCacheCapacity(cacheCapacity);
     setCacheUsed(cacheUsed);
     setXceiverCount(xceiverCount);
-    setLastUpdate(Time.now());
-    setLastUpdateMonotonic(Time.monotonicNow());
     this.volumeFailures = volFailures;
     this.volumeFailureSummary = volumeFailureSummary;
     for (StorageReport report : reports) {
@@ -451,7 +455,6 @@ public class DatanodeDescriptor extends DatanodeInfo {
       totalDfsUsed += report.getDfsUsed();
       totalNonDfsUsed += report.getNonDfsUsed();
     }
-    rollBlocksScheduled(getLastUpdateMonotonic());
 
     // Update total metrics for the node.
     setCapacity(totalCapacity);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4a85c69/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 35fbb9c..2730393 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1319,7 +1319,7 @@ public class DataNode extends ReconfigurableBase
 
   // used only for testing
   @VisibleForTesting
-  void setHeartbeatsDisabledForTests(
+  public void setHeartbeatsDisabledForTests(
       boolean heartbeatsDisabledForTests) {
     this.heartbeatsDisabledForTests = heartbeatsDisabledForTests;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4a85c69/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index 6df8fcf..74be90c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -17,9 +17,11 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import com.google.common.base.Supplier;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
@@ -36,6 +38,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.protocol.BlockType;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -52,6 +55,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.net.Node;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Test;
 
@@ -178,4 +182,53 @@ public class TestDeadDatanode {
           .getDatanodeDescriptor().equals(clientNode));
     }
   }
+
+  @Test
+  public void testNonDFSUsedONDeadNodeReReg() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
+        6 * 1000);
+    long CAPACITY = 5000L;
+    long[] capacities = new long[] { 4 * CAPACITY, 4 * CAPACITY };
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
+          .simulatedCapacities(capacities).build();
+      long initialCapacity = cluster.getNamesystem(0).getCapacityTotal();
+      assertTrue(initialCapacity > 0);
+      DataNode dn1 = cluster.getDataNodes().get(0);
+      DataNode dn2 = cluster.getDataNodes().get(1);
+      final DatanodeDescriptor dn2Desc = cluster.getNamesystem(0)
+          .getBlockManager().getDatanodeManager()
+          .getDatanode(dn2.getDatanodeId());
+      dn1.setHeartbeatsDisabledForTests(true);
+      cluster.setDataNodeDead(dn1.getDatanodeId());
+      assertEquals("Capacity shouldn't include DeadNode", dn2Desc.getCapacity(),
+          cluster.getNamesystem(0).getCapacityTotal());
+      assertEquals("NonDFS-used shouldn't include DeadNode",
+          dn2Desc.getNonDfsUsed(),
+          cluster.getNamesystem(0).getNonDfsUsedSpace());
+      // Wait for re-registration and heartbeat
+      dn1.setHeartbeatsDisabledForTests(false);
+      final DatanodeDescriptor dn1Desc = cluster.getNamesystem(0)
+          .getBlockManager().getDatanodeManager()
+          .getDatanode(dn1.getDatanodeId());
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+
+        @Override public Boolean get() {
+          return dn1Desc.isAlive() && dn1Desc.isHeartbeatedSinceRegistration();
+        }
+      }, 100, 5000);
+      assertEquals("Capacity should be 0 after all DNs dead", initialCapacity,
+          cluster.getNamesystem(0).getCapacityTotal());
+      long nonDfsAfterReg = cluster.getNamesystem(0).getNonDfsUsedSpace();
+      assertEquals("NonDFS should include actual DN NonDFSUsed",
+          dn1Desc.getNonDfsUsed() + dn2Desc.getNonDfsUsed(), nonDfsAfterReg);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: YARN-6307. Refactor FairShareComparator#compare (Contributed by Yufei Gu via Daniel Templeton)

Posted by xg...@apache.org.
YARN-6307. Refactor FairShareComparator#compare (Contributed by Yufei Gu via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f81a4efb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f81a4efb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f81a4efb

Branch: refs/heads/YARN-5734
Commit: f81a4efb8c40f99a9a6b7b42d3b6eeedf43eb27a
Parents: ac9489f
Author: Daniel Templeton <te...@apache.org>
Authored: Tue Jul 25 13:00:31 2017 -0700
Committer: Daniel Templeton <te...@apache.org>
Committed: Tue Jul 25 13:00:31 2017 -0700

----------------------------------------------------------------------
 .../fair/policies/FairSharePolicy.java          | 125 ++++++++++++-------
 1 file changed, 81 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f81a4efb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
index 2a852aa..0ef90a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
@@ -56,24 +56,28 @@ public class FairSharePolicy extends SchedulingPolicy {
   }
 
   /**
-   * Compare Schedulables via weighted fair sharing. In addition, Schedulables
-   * below their min share get priority over those whose min share is met.
+   * Compare Schedulables mainly via fair share usage to meet fairness.
+   * Specifically, it goes through following four steps.
    *
-   * Schedulables without resource demand get lower priority than
-   * ones who have demands.
+   * 1. Compare demands. Schedulables without resource demand get lower priority
+   * than ones who have demands.
    * 
-   * Schedulables below their min share are compared by how far below it they
-   * are as a ratio. For example, if job A has 8 out of a min share of 10 tasks
-   * and job B has 50 out of a min share of 100, then job B is scheduled next,
-   * because B is at 50% of its min share and A is at 80% of its min share.
+   * 2. Compare min share usage. Schedulables below their min share are compared
+   * by how far below it they are as a ratio. For example, if job A has 8 out
+   * of a min share of 10 tasks and job B has 50 out of a min share of 100,
+   * then job B is scheduled next, because B is at 50% of its min share and A
+   * is at 80% of its min share.
    * 
-   * Schedulables above their min share are compared by (runningTasks / weight).
+   * 3. Compare fair share usage. Schedulables above their min share are
+   * compared by fair share usage by checking (resource usage / weight).
    * If all weights are equal, slots are given to the job with the fewest tasks;
    * otherwise, jobs with more weight get proportionally more slots. If weight
    * equals to 0, we can't compare Schedulables by (resource usage/weight).
    * There are two situations: 1)All weights equal to 0, slots are given
    * to one with less resource usage. 2)Only one of weight equals to 0, slots
    * are given to the one with non-zero weight.
+   *
+   * 4. Break the tie by compare submit time and job name.
    */
   private static class FairShareComparator implements Comparator<Schedulable>,
       Serializable {
@@ -82,37 +86,88 @@ public class FairSharePolicy extends SchedulingPolicy {
 
     @Override
     public int compare(Schedulable s1, Schedulable s2) {
+      int res = compareDemand(s1, s2);
+
+      // Pre-compute resource usages to avoid duplicate calculation
+      Resource resourceUsage1 = s1.getResourceUsage();
+      Resource resourceUsage2 = s2.getResourceUsage();
+
+      if (res == 0) {
+        res = compareMinShareUsage(s1, s2, resourceUsage1, resourceUsage2);
+      }
+
+      if (res == 0) {
+        res = compareFairShareUsage(s1, s2, resourceUsage1, resourceUsage2);
+      }
+
+      // Break the tie by submit time
+      if (res == 0) {
+        res = (int) Math.signum(s1.getStartTime() - s2.getStartTime());
+      }
+
+      // Break the tie by job name
+      if (res == 0) {
+        res = s1.getName().compareTo(s2.getName());
+      }
+
+      return res;
+    }
+
+    private int compareDemand(Schedulable s1, Schedulable s2) {
+      int res = 0;
       Resource demand1 = s1.getDemand();
       Resource demand2 = s2.getDemand();
       if (demand1.equals(Resources.none()) && Resources.greaterThan(
           RESOURCE_CALCULATOR, null, demand2, Resources.none())) {
-        return 1;
+        res = 1;
       } else if (demand2.equals(Resources.none()) && Resources.greaterThan(
           RESOURCE_CALCULATOR, null, demand1, Resources.none())) {
-        return -1;
+        res = -1;
       }
+      return res;
+    }
 
-      double minShareRatio1, minShareRatio2;
-      double useToWeightRatio1, useToWeightRatio2;
-      double weight1, weight2;
-      //Do not repeat the getResourceUsage calculation
-      Resource resourceUsage1 = s1.getResourceUsage();
-      Resource resourceUsage2 = s2.getResourceUsage();
+    private int compareMinShareUsage(Schedulable s1, Schedulable s2,
+        Resource resourceUsage1, Resource resourceUsage2) {
+      int res;
       Resource minShare1 = Resources.min(RESOURCE_CALCULATOR, null,
-          s1.getMinShare(), demand1);
+          s1.getMinShare(), s1.getDemand());
       Resource minShare2 = Resources.min(RESOURCE_CALCULATOR, null,
-          s2.getMinShare(), demand2);
+          s2.getMinShare(), s2.getDemand());
       boolean s1Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,
           resourceUsage1, minShare1);
       boolean s2Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,
           resourceUsage2, minShare2);
-      minShareRatio1 = (double) resourceUsage1.getMemorySize()
-          / Resources.max(RESOURCE_CALCULATOR, null, minShare1, ONE).getMemorySize();
-      minShareRatio2 = (double) resourceUsage2.getMemorySize()
-          / Resources.max(RESOURCE_CALCULATOR, null, minShare2, ONE).getMemorySize();
 
-      weight1 = s1.getWeights().getWeight(ResourceType.MEMORY);
-      weight2 = s2.getWeights().getWeight(ResourceType.MEMORY);
+      if (s1Needy && !s2Needy) {
+        res = -1;
+      } else if (s2Needy && !s1Needy) {
+        res = 1;
+      } else if (s1Needy && s2Needy) {
+        double minShareRatio1 = (double) resourceUsage1.getMemorySize() /
+            Resources.max(RESOURCE_CALCULATOR, null, minShare1, ONE)
+                .getMemorySize();
+        double minShareRatio2 = (double) resourceUsage2.getMemorySize() /
+            Resources.max(RESOURCE_CALCULATOR, null, minShare2, ONE)
+                .getMemorySize();
+        res = (int) Math.signum(minShareRatio1 - minShareRatio2);
+      } else {
+        res = 0;
+      }
+
+      return res;
+    }
+
+    /**
+     * To simplify computation, use weights instead of fair shares to calculate
+     * fair share usage.
+     */
+    private int compareFairShareUsage(Schedulable s1, Schedulable s2,
+        Resource resourceUsage1, Resource resourceUsage2) {
+      double weight1 = s1.getWeights().getWeight(ResourceType.MEMORY);
+      double weight2 = s2.getWeights().getWeight(ResourceType.MEMORY);
+      double useToWeightRatio1;
+      double useToWeightRatio2;
       if (weight1 > 0.0 && weight2 > 0.0) {
         useToWeightRatio1 = resourceUsage1.getMemorySize() / weight1;
         useToWeightRatio2 = resourceUsage2.getMemorySize() / weight2;
@@ -130,25 +185,7 @@ public class FairSharePolicy extends SchedulingPolicy {
         }
       }
 
-      int res = 0;
-      if (s1Needy && !s2Needy)
-        res = -1;
-      else if (s2Needy && !s1Needy)
-        res = 1;
-      else if (s1Needy && s2Needy)
-        res = (int) Math.signum(minShareRatio1 - minShareRatio2);
-      else
-        // Neither schedulable is needy
-        res = (int) Math.signum(useToWeightRatio1 - useToWeightRatio2);
-      if (res == 0) {
-        // Apps are tied in fairness ratio. Break the tie by submit time and job
-        // name to get a deterministic ordering, which is useful for unit tests.
-        res = (int) Math.signum(s1.getStartTime() - s2.getStartTime());
-        if (res == 0) {
-          res = s1.getName().compareTo(s2.getName());
-        }
-      }
-      return res;
+      return (int) Math.signum(useToWeightRatio1 - useToWeightRatio2);
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: HDFS-2319. Add test cases for FSshell -stat. Contributed by XieXianshan and Bharat Viswanadham.

Posted by xg...@apache.org.
HDFS-2319. Add test cases for FSshell -stat. Contributed by XieXianshan and Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3c73002
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3c73002
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3c73002

Branch: refs/heads/YARN-5734
Commit: e3c73002250a21a771689081b51764eca1d862a7
Parents: 5f4808c
Author: Jitendra Pandey <ji...@apache.org>
Authored: Thu Jul 27 13:23:15 2017 -0700
Committer: Jitendra Pandey <ji...@apache.org>
Committed: Thu Jul 27 13:23:15 2017 -0700

----------------------------------------------------------------------
 .../src/test/resources/testHDFSConf.xml         | 125 ++++++++++++++++++-
 1 file changed, 124 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3c73002/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index 9302507..ba90efa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -7203,7 +7203,130 @@
         </comparator>
       </comparators>
     </test>
-  
+
+    <test> <!-- TESTED -->
+      <description>stat: Test for hdfs:// path - user/group name for directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dirtest</command>
+        <command>-fs NAMENODE -chown hadoop:hadoopgrp hdfs:///dirtest</command>
+        <command>-fs NAMENODE -stat "%u-%g" hdfs:///dirtest</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r hdfs:///dirtest</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>hadoop-hadoopgrp</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>stat: Test for hdfs:// path - user/group name for file</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes hdfs:///data60bytes</command>
+        <command>-fs NAMENODE -chown hadoop:hadoopgrp hdfs:////data60bytes</command>
+        <command>-fs NAMENODE -stat "%u-%g" hdfs:////data60bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r hdfs:///data60bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>hadoop-hadoopgrp</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>stat: Test for hdfs:// path - user/group name for multiple files</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes hdfs:///data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes hdfs:///data30bytes</command>
+        <command>-fs NAMENODE -chown hadoop:hadoopgrp hdfs:///data60bytes</command>
+        <command>-fs NAMENODE -chown hdfs:hdfs hdfs:///data30bytes</command>
+        <command>-fs NAMENODE -stat "%u-%g" hdfs:///data*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r hdfs:///data60bytes</command>
+        <command>-fs NAMENODE -rm -r hdfs:////data30bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>hadoop-hadoopgrp</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>hdfs-hdfs</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>stat: Test for Namenode's path - user/group name for directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -chown hadoop:hadoopgrp NAMENODE/dir0/</command>
+        <command>-fs NAMENODE -stat "%u-%g" NAMENODE/dir0/</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>hadoop-hadoopgrp</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+    <description>stat: Test for Namenode's path - user/group name for file </description>
+    <test-commands>
+    <command>-fs NAMENODE -mkdir /dir0</command>
+    <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/dir0/data15bytes</command>
+    <command>-fs NAMENODE -chown hadoop:hadoopgrp NAMENODE/dir0/data15bytes</command>
+    <command>-fs NAMENODE -stat "%u-%g" NAMENODE/dir0/data15bytes</command>
+    </test-commands>
+    <cleanup-commands>
+      <command>-fs NAMENODE -rm -r NAMENODE/dir0</command>
+    </cleanup-commands>
+    <comparators>
+      <comparator>
+        <type>TokenComparator</type>
+        <expected-output>hadoop-hadoopgrp</expected-output>
+      </comparator>
+    </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>stat: Test for Namenode's path - user/group name for multiple files </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/dir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes NAMENODE/dir0/data30bytes</command>
+        <command>-fs NAMENODE -chown hadoop:hadoopgrp NAMENODE/dir0/data15bytes</command>
+        <command>-fs NAMENODE -chown hdfs:hdfs NAMENODE/dir0/data30bytes</command>
+        <command>-fs NAMENODE -stat "%u-%g" NAMENODE/dir0/data*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>hadoop-hadoopgrp</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>hdfs-hdfs</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
     <!-- Tests for tail -->
     <test> <!-- TESTED -->
       <description>tail: contents of file(absolute path)</description>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: YARN-6870. Fix floating point inaccuracies in resource availability check in AllocationBasedResourceUtilizationTracker. (Brook Zhou via asuresh)

Posted by xg...@apache.org.
YARN-6870. Fix floating point inaccuracies in resource availability check in AllocationBasedResourceUtilizationTracker. (Brook Zhou via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/890e14c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/890e14c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/890e14c0

Branch: refs/heads/YARN-5734
Commit: 890e14c02a612c772cecd5dff2411060efd418a3
Parents: bcde66b
Author: Arun Suresh <as...@apache.org>
Authored: Fri Jul 28 16:32:43 2017 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Fri Jul 28 16:32:43 2017 -0700

----------------------------------------------------------------------
 ...locationBasedResourceUtilizationTracker.java | 31 +++++--
 .../scheduler/ContainerScheduler.java           |  5 +-
 ...locationBasedResourceUtilizationTracker.java | 93 ++++++++++++++++++++
 3 files changed, 123 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/890e14c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/AllocationBasedResourceUtilizationTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/AllocationBasedResourceUtilizationTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/AllocationBasedResourceUtilizationTracker.java
index 9839aeb..6e2b617 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/AllocationBasedResourceUtilizationTracker.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/AllocationBasedResourceUtilizationTracker.java
@@ -118,19 +118,40 @@ public class AllocationBasedResourceUtilizationTracker implements
       return false;
     }
 
-    float vCores = (float) cpuVcores /
-        getContainersMonitor().getVCoresAllocatedForContainers();
     if (LOG.isDebugEnabled()) {
       LOG.debug("before cpuCheck [asked={} > allowed={}]",
-          this.containersAllocation.getCPU(), vCores);
+          this.containersAllocation.getCPU(),
+          getContainersMonitor().getVCoresAllocatedForContainers());
     }
-    // Check CPU.
-    if (this.containersAllocation.getCPU() + vCores > 1.0f) {
+    // Check CPU. Compare using integral values of cores to avoid decimal
+    // inaccuracies.
+    if (!hasEnoughCpu(this.containersAllocation.getCPU(),
+        getContainersMonitor().getVCoresAllocatedForContainers(), cpuVcores)) {
       return false;
     }
     return true;
   }
 
+  /**
+   * Returns whether there is enough space for coresRequested in totalCores.
+   * Converts currentAllocation usage to nearest integer count before comparing,
+   * as floats are inherently imprecise. NOTE: this calculation assumes that
+   * requested core counts must be integers, and currentAllocation core count
+   * must also be an integer.
+   *
+   * @param currentAllocation The current allocation, a float value from 0 to 1.
+   * @param totalCores The total cores in the system.
+   * @param coresRequested The number of cores requested.
+   * @return True if currentAllocationtotalCores*coresRequested &lt;=
+   *         totalCores.
+   */
+  public boolean hasEnoughCpu(float currentAllocation, long totalCores,
+      int coresRequested) {
+    // Must not cast here, as it would truncate the decimal digits.
+    return Math.round(currentAllocation * totalCores)
+        + coresRequested <= totalCores;
+  }
+
   public ContainersMonitor getContainersMonitor() {
     return this.scheduler.getContainersMonitor();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/890e14c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
index 19243ac..c119bf2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
@@ -392,7 +392,10 @@ public class ContainerScheduler extends AbstractService implements
       ResourceUtilization resourcesToFreeUp) {
     return resourcesToFreeUp.getPhysicalMemory() <= 0 &&
         resourcesToFreeUp.getVirtualMemory() <= 0 &&
-        resourcesToFreeUp.getCPU() <= 0.0f;
+        // Convert the number of cores to nearest integral number, due to
+        // imprecision of direct float comparison.
+        Math.round(resourcesToFreeUp.getCPU()
+            * getContainersMonitor().getVCoresAllocatedForContainers()) <= 0;
   }
 
   private ResourceUtilization resourcesToFreeUp(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/890e14c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestAllocationBasedResourceUtilizationTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestAllocationBasedResourceUtilizationTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestAllocationBasedResourceUtilizationTracker.java
new file mode 100644
index 0000000..82c2147
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestAllocationBasedResourceUtilizationTracker.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Tests for the {@link AllocationBasedResourceUtilizationTracker} class.
+ */
+public class TestAllocationBasedResourceUtilizationTracker {
+
+  private ContainerScheduler mockContainerScheduler;
+
+  @Before
+  public void setup() {
+    mockContainerScheduler = mock(ContainerScheduler.class);
+    ContainersMonitor containersMonitor =
+        new ContainersMonitorImpl(mock(ContainerExecutor.class),
+            mock(AsyncDispatcher.class), mock(Context.class));
+    YarnConfiguration conf = new YarnConfiguration();
+    conf.setInt(YarnConfiguration.NM_PMEM_MB, 1024);
+    conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, true);
+    conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, true);
+    conf.setFloat(YarnConfiguration.NM_VMEM_PMEM_RATIO, 2.0f);
+    conf.setInt(YarnConfiguration.NM_VCORES, 8);
+    containersMonitor.init(conf);
+    when(mockContainerScheduler.getContainersMonitor())
+        .thenReturn(containersMonitor);
+  }
+
+  /**
+   * Node has capacity for 1024 MB and 8 cores. Saturate the node. When full the
+   * hasResourceAvailable should return false.
+   */
+  @Test
+  public void testHasResourcesAvailable() {
+    AllocationBasedResourceUtilizationTracker tracker =
+        new AllocationBasedResourceUtilizationTracker(mockContainerScheduler);
+    Container testContainer = mock(Container.class);
+    when(testContainer.getResource()).thenReturn(Resource.newInstance(512, 4));
+    for (int i = 0; i < 2; i++) {
+      Assert.assertTrue(tracker.hasResourcesAvailable(testContainer));
+      tracker.addContainerResources(testContainer);
+    }
+    Assert.assertFalse(tracker.hasResourcesAvailable(testContainer));
+  }
+
+  /**
+   * Test the case where the current allocation has been truncated to 0.8888891
+   * (8/9 cores used). Request 1 additional core - hasEnoughCpu should return
+   * true.
+   */
+  @Test
+  public void testHasEnoughCpu() {
+    AllocationBasedResourceUtilizationTracker tracker =
+        new AllocationBasedResourceUtilizationTracker(mockContainerScheduler);
+    float currentAllocation = 0.8888891f;
+    long totalCores = 9;
+    int alreadyUsedCores = 8;
+    Assert.assertTrue(tracker.hasEnoughCpu(currentAllocation, totalCores,
+        (int) totalCores - alreadyUsedCores));
+    Assert.assertFalse(tracker.hasEnoughCpu(currentAllocation, totalCores,
+        (int) totalCores - alreadyUsedCores + 1));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: YARN-6150. TestContainerManagerSecurity tests for Yarn Server are flakey. Contributed by Daniel Sturman and Ray Chiang.

Posted by xg...@apache.org.
YARN-6150. TestContainerManagerSecurity tests for Yarn Server are flakey. Contributed by Daniel Sturman and Ray Chiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/218b1b33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/218b1b33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/218b1b33

Branch: refs/heads/YARN-5734
Commit: 218b1b33ffe83cf2e330a2aa90685d0c14547a3d
Parents: f2921e5
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Jul 25 15:11:21 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Jul 25 15:12:08 2017 +0900

----------------------------------------------------------------------
 .../server/TestContainerManagerSecurity.java    | 74 +++++++++++++-------
 1 file changed, 50 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/218b1b33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
index 9626b35..3ba4beb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
@@ -67,6 +67,7 @@ import org.apache.hadoop.yarn.security.NMTokenIdentifier;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
 import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
@@ -122,7 +123,13 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
     testRootDir.delete();
   }
 
-  @Parameters
+  /*
+   * Run two tests: one with no security ("simple") and one with "Secure"
+   * The first parameter is just the test name to make it easier to debug
+   * and to give details in say an IDE.  The second is the configuraiton
+   * object to use.
+   */
+  @Parameters(name = "{0}")
   public static Collection<Object[]> configs() {
     Configuration configurationWithoutSecurity = new Configuration();
     configurationWithoutSecurity.set(
@@ -142,16 +149,18 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
       YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY,
       httpSpnegoKeytabFile.getAbsolutePath());
 
-    return Arrays.asList(new Object[][] { { configurationWithoutSecurity },
-        { configurationWithSecurity } });
+    return Arrays.asList(new Object[][] {
+        {"Simple", configurationWithoutSecurity},
+        {"Secure", configurationWithSecurity}});
   }
   
-  public TestContainerManagerSecurity(Configuration conf) {
+  public TestContainerManagerSecurity(String name, Configuration conf) {
+    LOG.info("RUNNING TEST " + name);
     conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 100000L);
     this.conf = conf;
   }
   
-  @Test (timeout = 120000)
+  @Test
   public void testContainerManager() throws Exception {
       
       // TestNMTokens.
@@ -165,7 +174,11 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
 
   }
 
-  private void testNMTokens(Configuration conf) throws Exception {
+  /**
+   * Run a series of tests using different NMTokens.  A configuration is
+   * provided for managing creating of the tokens and rpc.
+   */
+  private void testNMTokens(Configuration testConf) throws Exception {
     NMTokenSecretManagerInRM nmTokenSecretManagerRM =
         yarnCluster.getResourceManager().getRMContext()
           .getNMTokenSecretManager();
@@ -201,7 +214,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
      * nmToken error. (This can be best tested if we roll over NMToken
      * master key twice).
      */
-    YarnRPC rpc = YarnRPC.create(conf);
+    YarnRPC rpc = YarnRPC.create(testConf);
     String user = "test";
     Resource r = Resource.newInstance(1024, 1);
 
@@ -233,7 +246,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
     StringBuilder sb;
     // testInvalidNMToken ... creating NMToken using different secret manager.
     
-    NMTokenSecretManagerInRM tempManager = new NMTokenSecretManagerInRM(conf);
+    NMTokenSecretManagerInRM tempManager = new NMTokenSecretManagerInRM(testConf);
     tempManager.rollMasterKey();
     do {
       tempManager.rollMasterKey();
@@ -252,7 +265,9 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
     }
     String errorMsg = testStartContainer(rpc, validAppAttemptId, validNode,
         validContainerToken, null, true);
-    Assert.assertTrue(errorMsg.contains(sb.toString()));
+    Assert.assertTrue("In calling " + validNode + " exception was '"
+        + errorMsg + "' but doesn't contain '"
+        + sb.toString() + "'", errorMsg.contains(sb.toString()));
     
     org.apache.hadoop.yarn.api.records.Token invalidNMToken =
         tempManager.createNMToken(validAppAttemptId, validNode, user);
@@ -277,7 +292,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
             validContainerToken, invalidNMToken, true)));
     
     // using correct tokens. nmtoken for app attempt should get saved.
-    conf.setInt(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS,
+    testConf.setInt(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS,
         4 * 60 * 1000);
     validContainerToken =
         containerTokenSecretManager.createContainerToken(validContainerId,
@@ -375,8 +390,8 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
     Assert.assertTrue(testGetContainer(rpc, validAppAttemptId, validNode,
         validContainerId, validNMToken, false).contains(sb.toString()));
 
-    // using appAttempt-1 NMtoken for launching container for appAttempt-2 should
-    // succeed.
+    // using appAttempt-1 NMtoken for launching container for appAttempt-2
+    // should succeed.
     ApplicationAttemptId attempt2 = ApplicationAttemptId.newInstance(appId, 2);
     Token attempt1NMToken =
         nmTokenSecretManagerRM
@@ -390,13 +405,20 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
   }
 
   private void waitForContainerToFinishOnNM(ContainerId containerId) {
-    Context nmContet = yarnCluster.getNodeManager(0).getNMContext();
+    Context nmContext = yarnCluster.getNodeManager(0).getNMContext();
     int interval = 4 * 60; // Max time for container token to expire.
-    Assert.assertNotNull(nmContet.getContainers().containsKey(containerId));
+
+    Assert.assertNotNull(nmContext.getContainers().containsKey(containerId));
+
+    // Get the container first, as it may be removed from the Context
+    // by asynchronous calls.
+    // This was leading to a flakey test as otherwise the container could
+    // be removed and end up null.
+    Container waitContainer = nmContext.getContainers().get(containerId);
+
     while ((interval-- > 0)
-        && !nmContet.getContainers().get(containerId)
-          .cloneAndGetContainerStatus().getState()
-          .equals(ContainerState.COMPLETE)) {
+        && !waitContainer.cloneAndGetContainerStatus()
+        .getState().equals(ContainerState.COMPLETE)) {
       try {
         LOG.info("Waiting for " + containerId + " to complete.");
         Thread.sleep(1000);
@@ -407,7 +429,8 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
     // explicitly acked by RM. Now, manually remove it for testing.
     yarnCluster.getNodeManager(0).getNodeStatusUpdater()
       .addCompletedContainer(containerId);
-    nmContet.getContainers().remove(containerId);
+    LOG.info("Removing container from NMContext, containerID = " + containerId);
+    nmContext.getContainers().remove(containerId);
   }
 
   protected void waitForNMToReceiveNMTokenKey(
@@ -439,7 +462,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
       ContainerId containerId, Token nmToken, boolean isExceptionExpected) {
     try {
       stopContainer(rpc, nmToken,
-          Arrays.asList(new ContainerId[] { containerId }), appAttemptId,
+          Arrays.asList(new ContainerId[] {containerId}), appAttemptId,
           nodeId);
       if (isExceptionExpected) {
         fail("Exception was expected!!");
@@ -525,7 +548,8 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
       proxy =
           getContainerManagementProtocolProxy(rpc, nmToken, nodeId,
               appAttemptId.toString());
-      GetContainerStatusesResponse statuses = proxy.getContainerStatuses(request);
+      GetContainerStatusesResponse statuses
+          = proxy.getContainerStatuses(request);
       if (statuses.getFailedRequests() != null
           && statuses.getFailedRequests().containsKey(containerId)) {
         parseAndThrowException(statuses.getFailedRequests().get(containerId)
@@ -546,7 +570,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
     ContainerLaunchContext context =
         Records.newRecord(ContainerLaunchContext.class);
     StartContainerRequest scRequest =
-        StartContainerRequest.newInstance(context,containerToken);
+        StartContainerRequest.newInstance(context, containerToken);
     List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
     list.add(scRequest);
     StartContainersRequest allRequests =
@@ -582,7 +606,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
     ContainerManagementProtocol proxy;
     UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
     final InetSocketAddress addr =
-        NetUtils.createSocketAddr(nodeId.getHost(), nodeId.getPort());
+        new InetSocketAddress(nodeId.getHost(), nodeId.getPort());
     if (nmToken != null) {
       ugi.addToken(ConverterUtils.convertFromYarn(nmToken, addr));      
     }
@@ -645,9 +669,11 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
     ContainerTokenIdentifier containerTokenIdentifier = 
         getContainerTokenIdentifierFromToken(containerToken);
     
-    // Verify new compatible version ContainerTokenIdentifier can work successfully.
+    // Verify new compatible version ContainerTokenIdentifier
+    // can work successfully.
     ContainerTokenIdentifierForTest newVersionTokenIdentifier = 
-        new ContainerTokenIdentifierForTest(containerTokenIdentifier, "message");
+        new ContainerTokenIdentifierForTest(containerTokenIdentifier,
+            "message");
     byte[] password = 
         containerTokenSecretManager.createPassword(newVersionTokenIdentifier);
     


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: HADOOP-14680. Azure: IndexOutOfBoundsException in BlockBlobInputStream. Contributed by Thomas Marquardt.

Posted by xg...@apache.org.
HADOOP-14680. Azure: IndexOutOfBoundsException in BlockBlobInputStream. Contributed by Thomas Marquardt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a92bf39e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a92bf39e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a92bf39e

Branch: refs/heads/YARN-5734
Commit: a92bf39e2313d4bfccd641ce0ccefe26f4903a69
Parents: f81a4ef
Author: Jitendra Pandey <ji...@apache.org>
Authored: Tue Jul 25 16:26:48 2017 -0700
Committer: Jitendra Pandey <ji...@apache.org>
Committed: Tue Jul 25 16:26:48 2017 -0700

----------------------------------------------------------------------
 .../hadoop/fs/azure/BlockBlobInputStream.java   |  2 +-
 .../fs/azure/TestBlockBlobInputStream.java      | 50 +++++++++++++++++++-
 2 files changed, 49 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a92bf39e/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
index 2ed0686..5542415 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java
@@ -358,7 +358,7 @@ final class BlockBlobInputStream extends InputStream implements Seekable {
      * Gets the current capacity of the stream.
      */
     public synchronized int capacity() {
-      return length - offset;
+      return length;
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a92bf39e/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
index 2db063b..2453584 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java
@@ -43,8 +43,11 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer;
 
-import static org.junit.Assert.*;
-import static org.junit.Assume.*;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeNotNull;
 
 import static org.apache.hadoop.test.LambdaTestUtils.*;
 
@@ -194,6 +197,49 @@ public class TestBlockBlobInputStream extends AbstractWasbTestBase {
     createTestFileAndSetLength();
   }
 
+  @Test
+  public void test_0200_BasicReadTestV2() throws Exception {
+    assumeHugeFileExists();
+
+    try (
+        FSDataInputStream inputStreamV1
+            = accountUsingInputStreamV1.getFileSystem().open(TEST_FILE_PATH);
+
+        FSDataInputStream inputStreamV2
+            = accountUsingInputStreamV2.getFileSystem().open(TEST_FILE_PATH);
+    ) {
+      byte[] bufferV1 = new byte[3 * MEGABYTE];
+      byte[] bufferV2 = new byte[bufferV1.length];
+
+      // v1 forward seek and read a kilobyte into first kilobyte of bufferV1
+      inputStreamV1.seek(5 * MEGABYTE);
+      int numBytesReadV1 = inputStreamV1.read(bufferV1, 0, KILOBYTE);
+      assertEquals(numBytesReadV1, KILOBYTE);
+
+      // v2 forward seek and read a kilobyte into first kilobyte of bufferV2
+      inputStreamV2.seek(5 * MEGABYTE);
+      int numBytesReadV2 = inputStreamV2.read(bufferV2, 0, KILOBYTE);
+      assertEquals(numBytesReadV2, KILOBYTE);
+
+      assertArrayEquals(bufferV1, bufferV2);
+
+      int len = MEGABYTE;
+      int offset = bufferV1.length - len;
+
+      // v1 reverse seek and read a megabyte into last megabyte of bufferV1
+      inputStreamV1.seek(3 * MEGABYTE);
+      numBytesReadV1 = inputStreamV1.read(bufferV1, offset, len);
+      assertEquals(numBytesReadV1, len);
+
+      // v2 reverse seek and read a megabyte into last megabyte of bufferV2
+      inputStreamV2.seek(3 * MEGABYTE);
+      numBytesReadV2 = inputStreamV2.read(bufferV2, offset, len);
+      assertEquals(numBytesReadV2, len);
+
+      assertArrayEquals(bufferV1, bufferV2);
+    }
+  }
+
   /**
    * Validates the implementation of InputStream.markSupported.
    * @throws IOException


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: HADOOP-11875. [JDK9] Adding a second copy of Hamlet without _ as a one-character identifier.

Posted by xg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/Hamlet.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/Hamlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/Hamlet.java
new file mode 100644
index 0000000..05e1b79
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/Hamlet.java
@@ -0,0 +1,30557 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+// Generated by HamletGen. Do NOT edit!
+package org.apache.hadoop.yarn.webapp.hamlet2;
+import static java.util.EnumSet.of;
+import static org.apache.hadoop.yarn.webapp.hamlet2.HamletImpl.EOpt.ENDTAG;
+import static org.apache.hadoop.yarn.webapp.hamlet2.HamletImpl.EOpt.INLINE;
+import static org.apache.hadoop.yarn.webapp.hamlet2.HamletImpl.EOpt.PRE;
+
+import java.io.PrintWriter;
+import java.util.EnumSet;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.yarn.webapp.SubView;
+
+@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
+public class Hamlet extends HamletImpl implements HamletSpec._Html {
+  public Hamlet(PrintWriter out, int nestLevel, boolean wasInline) {
+    super(out, nestLevel, wasInline);
+  }
+
+  static EnumSet<EOpt> opt(boolean endTag, boolean inline, boolean pre) {
+    EnumSet<EOpt> opts = of(ENDTAG);
+    if (!endTag) opts.remove(ENDTAG);
+    if (inline) opts.add(INLINE);
+    if (pre) opts.add(PRE);
+    return opts;
+  }
+
+  public class HTML<T extends __> extends EImp<T> implements HamletSpec.HTML {
+    public HTML(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public HTML<T> $lang(String value) {
+      addAttr("lang", value);
+      return this;
+    }
+
+    @Override
+    public HTML<T> $dir(Dir value) {
+      addAttr("dir", value);
+      return this;
+    }
+
+    @Override
+    public HEAD<HTML<T>> head() {
+      closeAttrs();
+      return head_(this, false);
+    }
+
+    @Override
+    public BODY<HTML<T>> body() {
+      closeAttrs();
+      return body_(this, false);
+    }
+
+    @Override
+    public BODY<HTML<T>> body(String selector) {
+      return setSelector(body(), selector);
+    }
+
+    @Override
+    public BASE<HTML<T>> base() {
+      closeAttrs();
+      return base_(this, false);
+    }
+
+    @Override
+    public HTML<T> base(String href) {
+      return base().$href(href).__();
+    }
+
+    @Override
+    public TITLE<HTML<T>> title() {
+      closeAttrs();
+      return title_(this, false);
+    }
+
+    @Override
+    public HTML<T> title(String cdata) {
+      return title().__(cdata).__();
+    }
+
+    @Override
+    public STYLE<HTML<T>> style() {
+      closeAttrs();
+      return style_(this, false);
+    }
+
+    @Override
+    public HTML<T> style(Object... lines) {
+      return style().$type("text/css").__(lines).__();
+    }
+
+    @Override
+    public LINK<HTML<T>> link() {
+      closeAttrs();
+      return link_(this, false);
+    }
+
+    @Override
+    public HTML<T> link(String href) {
+      return setLinkHref(link(), href).__();
+    }
+
+    @Override
+    public META<HTML<T>> meta() {
+      closeAttrs();
+      return meta_(this, false);
+    }
+
+    @Override
+    public HTML<T> meta(String name, String content) {
+      return meta().$name(name).$content(content).__();
+    }
+
+    @Override
+    public HTML<T> meta_http(String header, String content) {
+      return meta().$http_equiv(header).$content(content).__();
+    }
+
+    @Override
+    public SCRIPT<HTML<T>> script() {
+      closeAttrs();
+      return script_(this, false);
+    }
+
+    @Override
+    public HTML<T> script(String src) {
+      return setScriptSrc(script(), src).__();
+    }
+
+    @Override
+    public OBJECT<HTML<T>> object() {
+      closeAttrs();
+      return object_(this, true);
+    }
+
+    @Override
+    public OBJECT<HTML<T>> object(String selector) {
+      return setSelector(object(), selector);
+    }
+
+    @Override
+    public TABLE<HTML<T>> table() {
+      closeAttrs();
+      return table_(this, false);
+    }
+
+    @Override
+    public TABLE<HTML<T>> table(String selector) {
+      return setSelector(table(), selector);
+    }
+
+    @Override
+    public HTML<T> address(String cdata) {
+      return address().__(cdata).__();
+    }
+
+    @Override
+    public ADDRESS<HTML<T>> address() {
+      closeAttrs();
+      return address_(this, false);
+    }
+
+    @Override
+    public P<HTML<T>> p(String selector) {
+      return setSelector(p(), selector);
+    }
+
+    @Override
+    public P<HTML<T>> p() {
+      closeAttrs();
+      return p_(this, false);
+    }
+
+    @Override
+    public HTML<T> __(Class<? extends SubView> cls) {
+      _v(cls);
+      return this;
+    }
+
+    @Override
+    public HR<HTML<T>> hr() {
+      closeAttrs();
+      return hr_(this, false);
+    }
+
+    @Override
+    public HTML<T> hr(String selector) {
+      return setSelector(hr(), selector).__();
+    }
+
+    @Override
+    public DL<HTML<T>> dl(String selector) {
+      return setSelector(dl(), selector);
+    }
+
+    @Override
+    public DL<HTML<T>> dl() {
+      closeAttrs();
+      return dl_(this, false);
+    }
+
+    @Override
+    public DIV<HTML<T>> div(String selector) {
+      return setSelector(div(), selector);
+    }
+
+    @Override
+    public DIV<HTML<T>> div() {
+      closeAttrs();
+      return div_(this, false);
+    }
+
+    @Override
+    public BLOCKQUOTE<HTML<T>> blockquote() {
+      closeAttrs();
+      return blockquote_(this, false);
+    }
+
+    @Override
+    public BLOCKQUOTE<HTML<T>> bq() {
+      closeAttrs();
+      return blockquote_(this, false);
+    }
+
+    @Override
+    public HTML<T> h1(String cdata) {
+      return h1().__(cdata).__();
+    }
+
+    @Override
+    public H1<HTML<T>> h1() {
+      closeAttrs();
+      return h1_(this, false);
+    }
+
+    @Override
+    public HTML<T> h1(String selector, String cdata) {
+      return setSelector(h1(), selector).__(cdata).__();
+    }
+
+    @Override
+    public HTML<T> h2(String cdata) {
+      return h2().__(cdata).__();
+    }
+
+    @Override
+    public H2<HTML<T>> h2() {
+      closeAttrs();
+      return h2_(this, false);
+    }
+
+    @Override
+    public HTML<T> h2(String selector, String cdata) {
+      return setSelector(h2(), selector).__(cdata).__();
+    }
+
+    @Override
+    public H3<HTML<T>> h3() {
+      closeAttrs();
+      return h3_(this, false);
+    }
+
+    @Override
+    public HTML<T> h3(String cdata) {
+      return h3().__(cdata).__();
+    }
+
+    @Override
+    public HTML<T> h3(String selector, String cdata) {
+      return setSelector(h3(), selector).__(cdata).__();
+    }
+
+    @Override
+    public H4<HTML<T>> h4() {
+      closeAttrs();
+      return h4_(this, false);
+    }
+
+    @Override
+    public HTML<T> h4(String cdata) {
+      return h4().__(cdata).__();
+    }
+
+    @Override
+    public HTML<T> h4(String selector, String cdata) {
+      return setSelector(h4(), selector).__(cdata).__();
+    }
+
+    @Override
+    public H5<HTML<T>> h5() {
+      closeAttrs();
+      return h5_(this, false);
+    }
+
+    @Override
+    public HTML<T> h5(String cdata) {
+      return h5().__(cdata).__();
+    }
+
+    @Override
+    public HTML<T> h5(String selector, String cdata) {
+      return setSelector(h5(), selector).__(cdata).__();
+    }
+
+    @Override
+    public H6<HTML<T>> h6() {
+      closeAttrs();
+      return h6_(this, false);
+    }
+
+    @Override
+    public HTML<T> h6(String cdata) {
+      return h6().__(cdata).__();
+    }
+
+    @Override
+    public HTML<T> h6(String selector, String cdata) {
+      return setSelector(h6(), selector).__(cdata).__();
+    }
+
+    @Override
+    public UL<HTML<T>> ul() {
+      closeAttrs();
+      return ul_(this, false);
+    }
+
+    @Override
+    public UL<HTML<T>> ul(String selector) {
+      return setSelector(ul(), selector);
+    }
+
+    @Override
+    public OL<HTML<T>> ol() {
+      closeAttrs();
+      return ol_(this, false);
+    }
+
+    @Override
+    public OL<HTML<T>> ol(String selector) {
+      return setSelector(ol(), selector);
+    }
+
+    @Override
+    public PRE<HTML<T>> pre() {
+      closeAttrs();
+      return pre_(this, false);
+    }
+
+    @Override
+    public PRE<HTML<T>> pre(String selector) {
+      return setSelector(pre(), selector);
+    }
+
+    @Override
+    public FORM<HTML<T>> form() {
+      closeAttrs();
+      return form_(this, false);
+    }
+
+    @Override
+    public FORM<HTML<T>> form(String selector) {
+      return setSelector(form(), selector);
+    }
+
+    @Override
+    public FIELDSET<HTML<T>> fieldset() {
+      closeAttrs();
+      return fieldset_(this, false);
+    }
+
+    @Override
+    public FIELDSET<HTML<T>> fieldset(String selector) {
+      return setSelector(fieldset(), selector);
+    }
+
+    @Override
+    public INS<HTML<T>> ins() {
+      closeAttrs();
+      return ins_(this, true);
+    }
+
+    @Override
+    public HTML<T> ins(String cdata) {
+      return ins().__(cdata).__();
+    }
+
+    @Override
+    public DEL<HTML<T>> del() {
+      closeAttrs();
+      return del_(this, true);
+    }
+
+    @Override
+    public HTML<T> del(String cdata) {
+      return del().__(cdata).__();
+    }
+  }
+
+  private <T extends __> HEAD<T> head_(T e, boolean inline) {
+    return new HEAD<T>("head", e, opt(true, inline, false)); }
+
+  private <T extends __> BODY<T> body_(T e, boolean inline) {
+    return new BODY<T>("body", e, opt(true, inline, false)); }
+
+  public class SCRIPT<T extends __> extends EImp<T> implements HamletSpec.SCRIPT {
+    public SCRIPT(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public SCRIPT<T> $type(String value) {
+      addAttr("type", value);
+      return this;
+    }
+
+    @Override
+    public SCRIPT<T> $src(String value) {
+      addAttr("src", value);
+      return this;
+    }
+
+    @Override
+    public SCRIPT<T> $charset(String value) {
+      addAttr("charset", value);
+      return this;
+    }
+
+    @Override
+    public SCRIPT<T> $defer(String value) {
+      addAttr("defer", value);
+      return this;
+    }
+
+    @Override
+    public SCRIPT<T> __(Object... lines) {
+      _p(false, lines);
+      return this;
+    }
+  }
+
+  public class STYLE<T extends __> extends EImp<T> implements HamletSpec.STYLE {
+    public STYLE(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public STYLE<T> $type(String value) {
+      addAttr("type", value);
+      return this;
+    }
+
+    @Override
+    public STYLE<T> $media(EnumSet<Media> value) {
+      addMediaAttr("media", value);
+      return this;
+    }
+
+    @Override
+    public STYLE<T> $title(String value) {
+      addAttr("title", value);
+      return this;
+    }
+
+    @Override
+    public STYLE<T> $lang(String value) {
+      addAttr("lang", value);
+      return this;
+    }
+
+    @Override
+    public STYLE<T> $dir(Dir value) {
+      addAttr("dir", value);
+      return this;
+    }
+
+    @Override
+    public STYLE<T> __(Object... lines) {
+      _p(false, lines);
+      return this;
+    }
+  }
+
+  public class META<T extends __> extends EImp<T> implements HamletSpec.META {
+    public META(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public META<T> $http_equiv(String value) {
+      addAttr("http-equiv", value);
+      return this;
+    }
+
+    @Override
+    public META<T> $name(String value) {
+      addAttr("name", value);
+      return this;
+    }
+
+    @Override
+    public META<T> $content(String value) {
+      addAttr("content", value);
+      return this;
+    }
+
+    @Override
+    public META<T> $lang(String value) {
+      addAttr("lang", value);
+      return this;
+    }
+
+    @Override
+    public META<T> $dir(Dir value) {
+      addAttr("dir", value);
+      return this;
+    }
+  }
+
+  public class BASE<T extends __> extends EImp<T> implements HamletSpec.BASE {
+    public BASE(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public BASE<T> $href(String value) {
+      addAttr("href", value);
+      return this;
+    }
+  }
+
+  public class TITLE<T extends __> extends EImp<T> implements HamletSpec.TITLE {
+    public TITLE(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public TITLE<T> $lang(String value) {
+      addAttr("lang", value);
+      return this;
+    }
+
+    @Override
+    public TITLE<T> $dir(Dir value) {
+      addAttr("dir", value);
+      return this;
+    }
+
+    @Override
+    public TITLE<T> __(Object... lines) {
+      _p(true, lines);
+      return this;
+    }
+
+    @Override
+    public TITLE<T> _r(Object... lines) {
+      _p(false, lines);
+      return this;
+    }
+  }
+
+  public class HEAD<T extends __> extends EImp<T> implements HamletSpec.HEAD {
+    public HEAD(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public HEAD<T> $lang(String value) {
+      addAttr("lang", value);
+      return this;
+    }
+
+    @Override
+    public HEAD<T> $dir(Dir value) {
+      addAttr("dir", value);
+      return this;
+    }
+
+    @Override
+    public BASE<HEAD<T>> base() {
+      closeAttrs();
+      return base_(this, false);
+    }
+
+    @Override
+    public HEAD<T> base(String href) {
+      return base().$href(href).__();
+    }
+
+    @Override
+    public TITLE<HEAD<T>> title() {
+      closeAttrs();
+      return title_(this, false);
+    }
+
+    @Override
+    public HEAD<T> title(String cdata) {
+      return title().__(cdata).__();
+    }
+
+    @Override
+    public STYLE<HEAD<T>> style() {
+      closeAttrs();
+      return style_(this, false);
+    }
+
+    @Override
+    public HEAD<T> style(Object... lines) {
+      return style().$type("text/css").__(lines).__();
+    }
+
+    @Override
+    public LINK<HEAD<T>> link() {
+      closeAttrs();
+      return link_(this, false);
+    }
+
+    @Override
+    public HEAD<T> link(String href) {
+      return setLinkHref(link(), href).__();
+    }
+
+    @Override
+    public META<HEAD<T>> meta() {
+      closeAttrs();
+      return meta_(this, false);
+    }
+
+    @Override
+    public HEAD<T> meta(String name, String content) {
+      return meta().$name(name).$content(content).__();
+    }
+
+    @Override
+    public HEAD<T> meta_http(String header, String content) {
+      return meta().$http_equiv(header).$content(content).__();
+    }
+
+    @Override
+    public SCRIPT<HEAD<T>> script() {
+      closeAttrs();
+      return script_(this, false);
+    }
+
+    @Override
+    public HEAD<T> script(String src) {
+      return setScriptSrc(script(), src).__();
+    }
+
+    @Override
+    public OBJECT<HEAD<T>> object() {
+      closeAttrs();
+      return object_(this, true);
+    }
+
+    @Override
+    public OBJECT<HEAD<T>> object(String selector) {
+      return setSelector(object(), selector);
+    }
+  }
+
+  private <T extends __> BASE<T> base_(T e, boolean inline) {
+    return new BASE<T>("base", e, opt(false, inline, false)); }
+
+  private <T extends __> TITLE<T> title_(T e, boolean inline) {
+    return new TITLE<T>("title", e, opt(true, inline, false)); }
+
+  public class TD<T extends __> extends EImp<T> implements HamletSpec.TD {
+    public TD(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public TD<T> $headers(String value) {
+      addAttr("headers", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $scope(Scope value) {
+      addAttr("scope", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $rowspan(int value) {
+      addAttr("rowspan", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $colspan(int value) {
+      addAttr("colspan", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $id(String value) {
+      addAttr("id", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $class(String value) {
+      addAttr("class", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $title(String value) {
+      addAttr("title", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $style(String value) {
+      addAttr("style", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $lang(String value) {
+      addAttr("lang", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $dir(Dir value) {
+      addAttr("dir", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $onclick(String value) {
+      addAttr("onclick", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $ondblclick(String value) {
+      addAttr("ondblclick", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $onmousedown(String value) {
+      addAttr("onmousedown", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $onmouseup(String value) {
+      addAttr("onmouseup", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $onmouseover(String value) {
+      addAttr("onmouseover", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $onmousemove(String value) {
+      addAttr("onmousemove", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $onmouseout(String value) {
+      addAttr("onmouseout", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $onkeypress(String value) {
+      addAttr("onkeypress", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $onkeydown(String value) {
+      addAttr("onkeydown", value);
+      return this;
+    }
+
+    @Override
+    public TD<T> $onkeyup(String value) {
+      addAttr("onkeyup", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<TD<T>> table() {
+      closeAttrs();
+      return table_(this, false);
+    }
+
+    @Override
+    public TABLE<TD<T>> table(String selector) {
+      return setSelector(table(), selector);
+    }
+
+    @Override
+    public TD<T> address(String cdata) {
+      return address().__(cdata).__();
+    }
+
+    @Override
+    public ADDRESS<TD<T>> address() {
+      closeAttrs();
+      return address_(this, false);
+    }
+
+    @Override
+    public P<TD<T>> p(String selector) {
+      return setSelector(p(), selector);
+    }
+
+    @Override
+    public P<TD<T>> p() {
+      closeAttrs();
+      return p_(this, false);
+    }
+
+    @Override
+    public TD<T> __(Class<? extends SubView> cls) {
+      _v(cls);
+      return this;
+    }
+
+    @Override
+    public HR<TD<T>> hr() {
+      closeAttrs();
+      return hr_(this, false);
+    }
+
+    @Override
+    public TD<T> hr(String selector) {
+      return setSelector(hr(), selector).__();
+    }
+
+    @Override
+    public DL<TD<T>> dl(String selector) {
+      return setSelector(dl(), selector);
+    }
+
+    @Override
+    public DL<TD<T>> dl() {
+      closeAttrs();
+      return dl_(this, false);
+    }
+
+    @Override
+    public DIV<TD<T>> div(String selector) {
+      return setSelector(div(), selector);
+    }
+
+    @Override
+    public DIV<TD<T>> div() {
+      closeAttrs();
+      return div_(this, false);
+    }
+
+    @Override
+    public BLOCKQUOTE<TD<T>> blockquote() {
+      closeAttrs();
+      return blockquote_(this, false);
+    }
+
+    @Override
+    public BLOCKQUOTE<TD<T>> bq() {
+      closeAttrs();
+      return blockquote_(this, false);
+    }
+
+    @Override
+    public TD<T> h1(String cdata) {
+      return h1().__(cdata).__();
+    }
+
+    @Override
+    public H1<TD<T>> h1() {
+      closeAttrs();
+      return h1_(this, false);
+    }
+
+    @Override
+    public TD<T> h1(String selector, String cdata) {
+      return setSelector(h1(), selector).__(cdata).__();
+    }
+
+    @Override
+    public TD<T> h2(String cdata) {
+      return h2().__(cdata).__();
+    }
+
+    @Override
+    public H2<TD<T>> h2() {
+      closeAttrs();
+      return h2_(this, false);
+    }
+
+    @Override
+    public TD<T> h2(String selector, String cdata) {
+      return setSelector(h2(), selector).__(cdata).__();
+    }
+
+    @Override
+    public H3<TD<T>> h3() {
+      closeAttrs();
+      return h3_(this, false);
+    }
+
+    @Override
+    public TD<T> h3(String cdata) {
+      return h3().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> h3(String selector, String cdata) {
+      return setSelector(h3(), selector).__(cdata).__();
+    }
+
+    @Override
+    public H4<TD<T>> h4() {
+      closeAttrs();
+      return h4_(this, false);
+    }
+
+    @Override
+    public TD<T> h4(String cdata) {
+      return h4().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> h4(String selector, String cdata) {
+      return setSelector(h4(), selector).__(cdata).__();
+    }
+
+    @Override
+    public H5<TD<T>> h5() {
+      closeAttrs();
+      return h5_(this, false);
+    }
+
+    @Override
+    public TD<T> h5(String cdata) {
+      return h5().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> h5(String selector, String cdata) {
+      return setSelector(h5(), selector).__(cdata).__();
+    }
+
+    @Override
+    public H6<TD<T>> h6() {
+      closeAttrs();
+      return h6_(this, false);
+    }
+
+    @Override
+    public TD<T> h6(String cdata) {
+      return h6().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> h6(String selector, String cdata) {
+      return setSelector(h6(), selector).__(cdata).__();
+    }
+
+    @Override
+    public UL<TD<T>> ul() {
+      closeAttrs();
+      return ul_(this, false);
+    }
+
+    @Override
+    public UL<TD<T>> ul(String selector) {
+      return setSelector(ul(), selector);
+    }
+
+    @Override
+    public OL<TD<T>> ol() {
+      closeAttrs();
+      return ol_(this, false);
+    }
+
+    @Override
+    public OL<TD<T>> ol(String selector) {
+      return setSelector(ol(), selector);
+    }
+
+    @Override
+    public PRE<TD<T>> pre() {
+      closeAttrs();
+      return pre_(this, false);
+    }
+
+    @Override
+    public PRE<TD<T>> pre(String selector) {
+      return setSelector(pre(), selector);
+    }
+
+    @Override
+    public FORM<TD<T>> form() {
+      closeAttrs();
+      return form_(this, false);
+    }
+
+    @Override
+    public FORM<TD<T>> form(String selector) {
+      return setSelector(form(), selector);
+    }
+
+    @Override
+    public FIELDSET<TD<T>> fieldset() {
+      closeAttrs();
+      return fieldset_(this, false);
+    }
+
+    @Override
+    public FIELDSET<TD<T>> fieldset(String selector) {
+      return setSelector(fieldset(), selector);
+    }
+
+    @Override
+    public TD<T> __(Object... lines) {
+      _p(true, lines);
+      return this;
+    }
+
+    @Override
+    public TD<T> _r(Object... lines) {
+      _p(false, lines);
+      return this;
+    }
+
+    @Override
+    public B<TD<T>> b() {
+      closeAttrs();
+      return b_(this, true);
+    }
+
+    @Override
+    public TD<T> b(String cdata) {
+      return b().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> b(String selector, String cdata) {
+      return setSelector(b(), selector).__(cdata).__();
+    }
+
+    @Override
+    public I<TD<T>> i() {
+      closeAttrs();
+      return i_(this, true);
+    }
+
+    @Override
+    public TD<T> i(String cdata) {
+      return i().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> i(String selector, String cdata) {
+      return setSelector(i(), selector).__(cdata).__();
+    }
+
+    @Override
+    public SMALL<TD<T>> small() {
+      closeAttrs();
+      return small_(this, true);
+    }
+
+    @Override
+    public TD<T> small(String cdata) {
+      return small().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> small(String selector, String cdata) {
+      return setSelector(small(), selector).__(cdata).__();
+    }
+
+    @Override
+    public TD<T> em(String cdata) {
+      return em().__(cdata).__();
+    }
+
+    @Override
+    public EM<TD<T>> em() {
+      closeAttrs();
+      return em_(this, true);
+    }
+
+    @Override
+    public TD<T> em(String selector, String cdata) {
+      return setSelector(em(), selector).__(cdata).__();
+    }
+
+    @Override
+    public STRONG<TD<T>> strong() {
+      closeAttrs();
+      return strong_(this, true);
+    }
+
+    @Override
+    public TD<T> strong(String cdata) {
+      return strong().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> strong(String selector, String cdata) {
+      return setSelector(strong(), selector).__(cdata).__();
+    }
+
+    @Override
+    public DFN<TD<T>> dfn() {
+      closeAttrs();
+      return dfn_(this, true);
+    }
+
+    @Override
+    public TD<T> dfn(String cdata) {
+      return dfn().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> dfn(String selector, String cdata) {
+      return setSelector(dfn(), selector).__(cdata).__();
+    }
+
+    @Override
+    public CODE<TD<T>> code() {
+      closeAttrs();
+      return code_(this, true);
+    }
+
+    @Override
+    public TD<T> code(String cdata) {
+      return code().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> code(String selector, String cdata) {
+      return setSelector(code(), selector).__(cdata).__();
+    }
+
+    @Override
+    public TD<T> samp(String cdata) {
+      return samp().__(cdata).__();
+    }
+
+    @Override
+    public SAMP<TD<T>> samp() {
+      closeAttrs();
+      return samp_(this, true);
+    }
+
+    @Override
+    public TD<T> samp(String selector, String cdata) {
+      return setSelector(samp(), selector).__(cdata).__();
+    }
+
+    @Override
+    public KBD<TD<T>> kbd() {
+      closeAttrs();
+      return kbd_(this, true);
+    }
+
+    @Override
+    public TD<T> kbd(String cdata) {
+      return kbd().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> kbd(String selector, String cdata) {
+      return setSelector(kbd(), selector).__(cdata).__();
+    }
+
+    @Override
+    public VAR<TD<T>> var() {
+      closeAttrs();
+      return var_(this, true);
+    }
+
+    @Override
+    public TD<T> var(String cdata) {
+      return var().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> var(String selector, String cdata) {
+      return setSelector(var(), selector).__(cdata).__();
+    }
+
+    @Override
+    public CITE<TD<T>> cite() {
+      closeAttrs();
+      return cite_(this, true);
+    }
+
+    @Override
+    public TD<T> cite(String cdata) {
+      return cite().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> cite(String selector, String cdata) {
+      return setSelector(cite(), selector).__(cdata).__();
+    }
+
+    @Override
+    public ABBR<TD<T>> abbr() {
+      closeAttrs();
+      return abbr_(this, true);
+    }
+
+    @Override
+    public TD<T> abbr(String cdata) {
+      return abbr().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> abbr(String selector, String cdata) {
+      return setSelector(abbr(), selector).__(cdata).__();
+    }
+
+    @Override
+    public A<TD<T>> a() {
+      closeAttrs();
+      return a_(this, true);
+    }
+
+    @Override
+    public A<TD<T>> a(String selector) {
+      return setSelector(a(), selector);
+    }
+
+    @Override
+    public TD<T> a(String href, String anchorText) {
+      return a().$href(href).__(anchorText).__();
+    }
+
+    @Override
+    public TD<T> a(String selector, String href, String anchorText) {
+      return setSelector(a(), selector).$href(href).__(anchorText).__();
+    }
+
+    @Override
+    public IMG<TD<T>> img() {
+      closeAttrs();
+      return img_(this, true);
+    }
+
+    @Override
+    public TD<T> img(String src) {
+      return img().$src(src).__();
+    }
+
+    @Override
+    public OBJECT<TD<T>> object() {
+      closeAttrs();
+      return object_(this, true);
+    }
+
+    @Override
+    public OBJECT<TD<T>> object(String selector) {
+      return setSelector(object(), selector);
+    }
+
+    @Override
+    public SUB<TD<T>> sub() {
+      closeAttrs();
+      return sub_(this, true);
+    }
+
+    @Override
+    public TD<T> sub(String cdata) {
+      return sub().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> sub(String selector, String cdata) {
+      return setSelector(sub(), selector).__(cdata).__();
+    }
+
+    @Override
+    public SUP<TD<T>> sup() {
+      closeAttrs();
+      return sup_(this, true);
+    }
+
+    @Override
+    public TD<T> sup(String cdata) {
+      return sup().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> sup(String selector, String cdata) {
+      return setSelector(sup(), selector).__(cdata).__();
+    }
+
+    @Override
+    public MAP<TD<T>> map() {
+      closeAttrs();
+      return map_(this, true);
+    }
+
+    @Override
+    public MAP<TD<T>> map(String selector) {
+      return setSelector(map(), selector);
+    }
+
+    @Override
+    public TD<T> q(String cdata) {
+      return q().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> q(String selector, String cdata) {
+      return setSelector(q(), selector).__(cdata).__();
+    }
+
+    @Override
+    public Q<TD<T>> q() {
+      closeAttrs();
+      return q_(this, true);
+    }
+
+    @Override
+    public BR<TD<T>> br() {
+      closeAttrs();
+      return br_(this, true);
+    }
+
+    @Override
+    public TD<T> br(String selector) {
+      return setSelector(br(), selector).__();
+    }
+
+    @Override
+    public BDO<TD<T>> bdo() {
+      closeAttrs();
+      return bdo_(this, true);
+    }
+
+    @Override
+    public TD<T> bdo(Dir dir, String cdata) {
+      return bdo().$dir(dir).__(cdata).__();
+    }
+
+    @Override
+    public SPAN<TD<T>> span() {
+      closeAttrs();
+      return span_(this, true);
+    }
+
+    @Override
+    public TD<T> span(String cdata) {
+      return span().__(cdata).__();
+    }
+
+    @Override
+    public TD<T> span(String selector, String cdata) {
+      return setSelector(span(), selector).__(cdata).__();
+    }
+
+    @Override
+    public SCRIPT<TD<T>> script() {
+      closeAttrs();
+      return script_(this, true);
+    }
+
+    @Override
+    public TD<T> script(String src) {
+      return setScriptSrc(script(), src).__();
+    }
+
+    @Override
+    public INS<TD<T>> ins() {
+      closeAttrs();
+      return ins_(this, true);
+    }
+
+    @Override
+    public TD<T> ins(String cdata) {
+      return ins().__(cdata).__();
+    }
+
+    @Override
+    public DEL<TD<T>> del() {
+      closeAttrs();
+      return del_(this, true);
+    }
+
+    @Override
+    public TD<T> del(String cdata) {
+      return del().__(cdata).__();
+    }
+
+    @Override
+    public LABEL<TD<T>> label() {
+      closeAttrs();
+      return label_(this, true);
+    }
+
+    @Override
+    public TD<T> label(String forId, String cdata) {
+      return label().$for(forId).__(cdata).__();
+    }
+
+    @Override
+    public INPUT<TD<T>> input(String selector) {
+      return setSelector(input(), selector);
+    }
+
+    @Override
+    public INPUT<TD<T>> input() {
+      closeAttrs();
+      return input_(this, true);
+    }
+
+    @Override
+    public SELECT<TD<T>> select() {
+      closeAttrs();
+      return select_(this, true);
+    }
+
+    @Override
+    public SELECT<TD<T>> select(String selector) {
+      return setSelector(select(), selector);
+    }
+
+    @Override
+    public TEXTAREA<TD<T>> textarea(String selector) {
+      return setSelector(textarea(), selector);
+    }
+
+    @Override
+    public TEXTAREA<TD<T>> textarea() {
+      closeAttrs();
+      return textarea_(this, true);
+    }
+
+    @Override
+    public TD<T> textarea(String selector, String cdata) {
+      return setSelector(textarea(), selector).__(cdata).__();
+    }
+
+    @Override
+    public BUTTON<TD<T>> button() {
+      closeAttrs();
+      return button_(this, true);
+    }
+
+    @Override
+    public BUTTON<TD<T>> button(String selector) {
+      return setSelector(button(), selector);
+    }
+
+    @Override
+    public TD<T> button(String selector, String cdata) {
+      return setSelector(button(), selector).__(cdata).__();
+    }
+  }
+
+  public class TH<T extends __> extends EImp<T> implements HamletSpec.TH {
+    public TH(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public TH<T> $headers(String value) {
+      addAttr("headers", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $scope(Scope value) {
+      addAttr("scope", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $rowspan(int value) {
+      addAttr("rowspan", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $colspan(int value) {
+      addAttr("colspan", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $id(String value) {
+      addAttr("id", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $class(String value) {
+      addAttr("class", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $title(String value) {
+      addAttr("title", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $style(String value) {
+      addAttr("style", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $lang(String value) {
+      addAttr("lang", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $dir(Dir value) {
+      addAttr("dir", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $onclick(String value) {
+      addAttr("onclick", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $ondblclick(String value) {
+      addAttr("ondblclick", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $onmousedown(String value) {
+      addAttr("onmousedown", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $onmouseup(String value) {
+      addAttr("onmouseup", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $onmouseover(String value) {
+      addAttr("onmouseover", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $onmousemove(String value) {
+      addAttr("onmousemove", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $onmouseout(String value) {
+      addAttr("onmouseout", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $onkeypress(String value) {
+      addAttr("onkeypress", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $onkeydown(String value) {
+      addAttr("onkeydown", value);
+      return this;
+    }
+
+    @Override
+    public TH<T> $onkeyup(String value) {
+      addAttr("onkeyup", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<TH<T>> table() {
+      closeAttrs();
+      return table_(this, false);
+    }
+
+    @Override
+    public TABLE<TH<T>> table(String selector) {
+      return setSelector(table(), selector);
+    }
+
+    @Override
+    public TH<T> address(String cdata) {
+      return address().__(cdata).__();
+    }
+
+    @Override
+    public ADDRESS<TH<T>> address() {
+      closeAttrs();
+      return address_(this, false);
+    }
+
+    @Override
+    public P<TH<T>> p(String selector) {
+      return setSelector(p(), selector);
+    }
+
+    @Override
+    public P<TH<T>> p() {
+      closeAttrs();
+      return p_(this, false);
+    }
+
+    @Override
+    public TH<T> __(Class<? extends SubView> cls) {
+      _v(cls);
+      return this;
+    }
+
+    @Override
+    public HR<TH<T>> hr() {
+      closeAttrs();
+      return hr_(this, false);
+    }
+
+    @Override
+    public TH<T> hr(String selector) {
+      return setSelector(hr(), selector).__();
+    }
+
+    @Override
+    public DL<TH<T>> dl(String selector) {
+      return setSelector(dl(), selector);
+    }
+
+    @Override
+    public DL<TH<T>> dl() {
+      closeAttrs();
+      return dl_(this, false);
+    }
+
+    @Override
+    public DIV<TH<T>> div(String selector) {
+      return setSelector(div(), selector);
+    }
+
+    @Override
+    public DIV<TH<T>> div() {
+      closeAttrs();
+      return div_(this, false);
+    }
+
+    @Override
+    public BLOCKQUOTE<TH<T>> blockquote() {
+      closeAttrs();
+      return blockquote_(this, false);
+    }
+
+    @Override
+    public BLOCKQUOTE<TH<T>> bq() {
+      closeAttrs();
+      return blockquote_(this, false);
+    }
+
+    @Override
+    public TH<T> h1(String cdata) {
+      return h1().__(cdata).__();
+    }
+
+    @Override
+    public H1<TH<T>> h1() {
+      closeAttrs();
+      return h1_(this, false);
+    }
+
+    @Override
+    public TH<T> h1(String selector, String cdata) {
+      return setSelector(h1(), selector).__(cdata).__();
+    }
+
+    @Override
+    public TH<T> h2(String cdata) {
+      return h2().__(cdata).__();
+    }
+
+    @Override
+    public H2<TH<T>> h2() {
+      closeAttrs();
+      return h2_(this, false);
+    }
+
+    @Override
+    public TH<T> h2(String selector, String cdata) {
+      return setSelector(h2(), selector).__(cdata).__();
+    }
+
+    @Override
+    public H3<TH<T>> h3() {
+      closeAttrs();
+      return h3_(this, false);
+    }
+
+    @Override
+    public TH<T> h3(String cdata) {
+      return h3().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> h3(String selector, String cdata) {
+      return setSelector(h3(), selector).__(cdata).__();
+    }
+
+    @Override
+    public H4<TH<T>> h4() {
+      closeAttrs();
+      return h4_(this, false);
+    }
+
+    @Override
+    public TH<T> h4(String cdata) {
+      return h4().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> h4(String selector, String cdata) {
+      return setSelector(h4(), selector).__(cdata).__();
+    }
+
+    @Override
+    public H5<TH<T>> h5() {
+      closeAttrs();
+      return h5_(this, false);
+    }
+
+    @Override
+    public TH<T> h5(String cdata) {
+      return h5().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> h5(String selector, String cdata) {
+      return setSelector(h5(), selector).__(cdata).__();
+    }
+
+    @Override
+    public H6<TH<T>> h6() {
+      closeAttrs();
+      return h6_(this, false);
+    }
+
+    @Override
+    public TH<T> h6(String cdata) {
+      return h6().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> h6(String selector, String cdata) {
+      return setSelector(h6(), selector).__(cdata).__();
+    }
+
+    @Override
+    public UL<TH<T>> ul() {
+      closeAttrs();
+      return ul_(this, false);
+    }
+
+    @Override
+    public UL<TH<T>> ul(String selector) {
+      return setSelector(ul(), selector);
+    }
+
+    @Override
+    public OL<TH<T>> ol() {
+      closeAttrs();
+      return ol_(this, false);
+    }
+
+    @Override
+    public OL<TH<T>> ol(String selector) {
+      return setSelector(ol(), selector);
+    }
+
+    @Override
+    public PRE<TH<T>> pre() {
+      closeAttrs();
+      return pre_(this, false);
+    }
+
+    @Override
+    public PRE<TH<T>> pre(String selector) {
+      return setSelector(pre(), selector);
+    }
+
+    @Override
+    public FORM<TH<T>> form() {
+      closeAttrs();
+      return form_(this, false);
+    }
+
+    @Override
+    public FORM<TH<T>> form(String selector) {
+      return setSelector(form(), selector);
+    }
+
+    @Override
+    public FIELDSET<TH<T>> fieldset() {
+      closeAttrs();
+      return fieldset_(this, false);
+    }
+
+    @Override
+    public FIELDSET<TH<T>> fieldset(String selector) {
+      return setSelector(fieldset(), selector);
+    }
+
+    @Override
+    public TH<T> __(Object... lines) {
+      _p(true, lines);
+      return this;
+    }
+
+    @Override
+    public TH<T> _r(Object... lines) {
+      _p(false, lines);
+      return this;
+    }
+
+    @Override
+    public B<TH<T>> b() {
+      closeAttrs();
+      return b_(this, true);
+    }
+
+    @Override
+    public TH<T> b(String cdata) {
+      return b().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> b(String selector, String cdata) {
+      return setSelector(b(), selector).__(cdata).__();
+    }
+
+    @Override
+    public I<TH<T>> i() {
+      closeAttrs();
+      return i_(this, true);
+    }
+
+    @Override
+    public TH<T> i(String cdata) {
+      return i().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> i(String selector, String cdata) {
+      return setSelector(i(), selector).__(cdata).__();
+    }
+
+    @Override
+    public SMALL<TH<T>> small() {
+      closeAttrs();
+      return small_(this, true);
+    }
+
+    @Override
+    public TH<T> small(String cdata) {
+      return small().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> small(String selector, String cdata) {
+      return setSelector(small(), selector).__(cdata).__();
+    }
+
+    @Override
+    public TH<T> em(String cdata) {
+      return em().__(cdata).__();
+    }
+
+    @Override
+    public EM<TH<T>> em() {
+      closeAttrs();
+      return em_(this, true);
+    }
+
+    @Override
+    public TH<T> em(String selector, String cdata) {
+      return setSelector(em(), selector).__(cdata).__();
+    }
+
+    @Override
+    public STRONG<TH<T>> strong() {
+      closeAttrs();
+      return strong_(this, true);
+    }
+
+    @Override
+    public TH<T> strong(String cdata) {
+      return strong().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> strong(String selector, String cdata) {
+      return setSelector(strong(), selector).__(cdata).__();
+    }
+
+    @Override
+    public DFN<TH<T>> dfn() {
+      closeAttrs();
+      return dfn_(this, true);
+    }
+
+    @Override
+    public TH<T> dfn(String cdata) {
+      return dfn().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> dfn(String selector, String cdata) {
+      return setSelector(dfn(), selector).__(cdata).__();
+    }
+
+    @Override
+    public CODE<TH<T>> code() {
+      closeAttrs();
+      return code_(this, true);
+    }
+
+    @Override
+    public TH<T> code(String cdata) {
+      return code().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> code(String selector, String cdata) {
+      return setSelector(code(), selector).__(cdata).__();
+    }
+
+    @Override
+    public TH<T> samp(String cdata) {
+      return samp().__(cdata).__();
+    }
+
+    @Override
+    public SAMP<TH<T>> samp() {
+      closeAttrs();
+      return samp_(this, true);
+    }
+
+    @Override
+    public TH<T> samp(String selector, String cdata) {
+      return setSelector(samp(), selector).__(cdata).__();
+    }
+
+    @Override
+    public KBD<TH<T>> kbd() {
+      closeAttrs();
+      return kbd_(this, true);
+    }
+
+    @Override
+    public TH<T> kbd(String cdata) {
+      return kbd().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> kbd(String selector, String cdata) {
+      return setSelector(kbd(), selector).__(cdata).__();
+    }
+
+    @Override
+    public VAR<TH<T>> var() {
+      closeAttrs();
+      return var_(this, true);
+    }
+
+    @Override
+    public TH<T> var(String cdata) {
+      return var().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> var(String selector, String cdata) {
+      return setSelector(var(), selector).__(cdata).__();
+    }
+
+    @Override
+    public CITE<TH<T>> cite() {
+      closeAttrs();
+      return cite_(this, true);
+    }
+
+    @Override
+    public TH<T> cite(String cdata) {
+      return cite().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> cite(String selector, String cdata) {
+      return setSelector(cite(), selector).__(cdata).__();
+    }
+
+    @Override
+    public ABBR<TH<T>> abbr() {
+      closeAttrs();
+      return abbr_(this, true);
+    }
+
+    @Override
+    public TH<T> abbr(String cdata) {
+      return abbr().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> abbr(String selector, String cdata) {
+      return setSelector(abbr(), selector).__(cdata).__();
+    }
+
+    @Override
+    public A<TH<T>> a() {
+      closeAttrs();
+      return a_(this, true);
+    }
+
+    @Override
+    public A<TH<T>> a(String selector) {
+      return setSelector(a(), selector);
+    }
+
+    @Override
+    public TH<T> a(String href, String anchorText) {
+      return a().$href(href).__(anchorText).__();
+    }
+
+    @Override
+    public TH<T> a(String selector, String href, String anchorText) {
+      return setSelector(a(), selector).$href(href).__(anchorText).__();
+    }
+
+    @Override
+    public IMG<TH<T>> img() {
+      closeAttrs();
+      return img_(this, true);
+    }
+
+    @Override
+    public TH<T> img(String src) {
+      return img().$src(src).__();
+    }
+
+    @Override
+    public OBJECT<TH<T>> object() {
+      closeAttrs();
+      return object_(this, true);
+    }
+
+    @Override
+    public OBJECT<TH<T>> object(String selector) {
+      return setSelector(object(), selector);
+    }
+
+    @Override
+    public SUB<TH<T>> sub() {
+      closeAttrs();
+      return sub_(this, true);
+    }
+
+    @Override
+    public TH<T> sub(String cdata) {
+      return sub().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> sub(String selector, String cdata) {
+      return setSelector(sub(), selector).__(cdata).__();
+    }
+
+    @Override
+    public SUP<TH<T>> sup() {
+      closeAttrs();
+      return sup_(this, true);
+    }
+
+    @Override
+    public TH<T> sup(String cdata) {
+      return sup().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> sup(String selector, String cdata) {
+      return setSelector(sup(), selector).__(cdata).__();
+    }
+
+    @Override
+    public MAP<TH<T>> map() {
+      closeAttrs();
+      return map_(this, true);
+    }
+
+    @Override
+    public MAP<TH<T>> map(String selector) {
+      return setSelector(map(), selector);
+    }
+
+    @Override
+    public TH<T> q(String cdata) {
+      return q().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> q(String selector, String cdata) {
+      return setSelector(q(), selector).__(cdata).__();
+    }
+
+    @Override
+    public Q<TH<T>> q() {
+      closeAttrs();
+      return q_(this, true);
+    }
+
+    @Override
+    public BR<TH<T>> br() {
+      closeAttrs();
+      return br_(this, true);
+    }
+
+    @Override
+    public TH<T> br(String selector) {
+      return setSelector(br(), selector).__();
+    }
+
+    @Override
+    public BDO<TH<T>> bdo() {
+      closeAttrs();
+      return bdo_(this, true);
+    }
+
+    @Override
+    public TH<T> bdo(Dir dir, String cdata) {
+      return bdo().$dir(dir).__(cdata).__();
+    }
+
+    @Override
+    public SPAN<TH<T>> span() {
+      closeAttrs();
+      return span_(this, true);
+    }
+
+    @Override
+    public TH<T> span(String cdata) {
+      return span().__(cdata).__();
+    }
+
+    @Override
+    public TH<T> span(String selector, String cdata) {
+      return setSelector(span(), selector).__(cdata).__();
+    }
+
+    @Override
+    public SCRIPT<TH<T>> script() {
+      closeAttrs();
+      return script_(this, true);
+    }
+
+    @Override
+    public TH<T> script(String src) {
+      return setScriptSrc(script(), src).__();
+    }
+
+    @Override
+    public INS<TH<T>> ins() {
+      closeAttrs();
+      return ins_(this, true);
+    }
+
+    @Override
+    public TH<T> ins(String cdata) {
+      return ins().__(cdata).__();
+    }
+
+    @Override
+    public DEL<TH<T>> del() {
+      closeAttrs();
+      return del_(this, true);
+    }
+
+    @Override
+    public TH<T> del(String cdata) {
+      return del().__(cdata).__();
+    }
+
+    @Override
+    public LABEL<TH<T>> label() {
+      closeAttrs();
+      return label_(this, true);
+    }
+
+    @Override
+    public TH<T> label(String forId, String cdata) {
+      return label().$for(forId).__(cdata).__();
+    }
+
+    @Override
+    public INPUT<TH<T>> input(String selector) {
+      return setSelector(input(), selector);
+    }
+
+    @Override
+    public INPUT<TH<T>> input() {
+      closeAttrs();
+      return input_(this, true);
+    }
+
+    @Override
+    public SELECT<TH<T>> select() {
+      closeAttrs();
+      return select_(this, true);
+    }
+
+    @Override
+    public SELECT<TH<T>> select(String selector) {
+      return setSelector(select(), selector);
+    }
+
+    @Override
+    public TEXTAREA<TH<T>> textarea(String selector) {
+      return setSelector(textarea(), selector);
+    }
+
+    @Override
+    public TEXTAREA<TH<T>> textarea() {
+      closeAttrs();
+      return textarea_(this, true);
+    }
+
+    @Override
+    public TH<T> textarea(String selector, String cdata) {
+      return setSelector(textarea(), selector).__(cdata).__();
+    }
+
+    @Override
+    public BUTTON<TH<T>> button() {
+      closeAttrs();
+      return button_(this, true);
+    }
+
+    @Override
+    public BUTTON<TH<T>> button(String selector) {
+      return setSelector(button(), selector);
+    }
+
+    @Override
+    public TH<T> button(String selector, String cdata) {
+      return setSelector(button(), selector).__(cdata).__();
+    }
+  }
+
+  public class TR<T extends __> extends EImp<T> implements HamletSpec.TR {
+    public TR(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public TR<T> $id(String value) {
+      addAttr("id", value);
+      return this;
+    }
+
+    @Override
+    public TR<T> $class(String value) {
+      addAttr("class", value);
+      return this;
+    }
+
+    @Override
+    public TR<T> $title(String value) {
+      addAttr("title", value);
+      return this;
+    }
+
+    @Override
+    public TR<T> $style(String value) {
+      addAttr("style", value);
+      return this;
+    }
+
+    @Override
+    public TR<T> $lang(String value) {
+      addAttr("lang", value);
+      return this;
+    }
+
+    @Override
+    public TR<T> $dir(Dir value) {
+      addAttr("dir", value);
+      return this;
+    }
+
+    @Override
+    public TR<T> $onclick(String value) {
+      addAttr("onclick", value);
+      return this;
+    }
+
+    @Override
+    public TR<T> $ondblclick(String value) {
+      addAttr("ondblclick", value);
+      return this;
+    }
+
+    @Override
+    public TR<T> $onmousedown(String value) {
+      addAttr("onmousedown", value);
+      return this;
+    }
+
+    @Override
+    public TR<T> $onmouseup(String value) {
+      addAttr("onmouseup", value);
+      return this;
+    }
+
+    @Override
+    public TR<T> $onmouseover(String value) {
+      addAttr("onmouseover", value);
+      return this;
+    }
+
+    @Override
+    public TR<T> $onmousemove(String value) {
+      addAttr("onmousemove", value);
+      return this;
+    }
+
+    @Override
+    public TR<T> $onmouseout(String value) {
+      addAttr("onmouseout", value);
+      return this;
+    }
+
+    @Override
+    public TR<T> $onkeypress(String value) {
+      addAttr("onkeypress", value);
+      return this;
+    }
+
+    @Override
+    public TR<T> $onkeydown(String value) {
+      addAttr("onkeydown", value);
+      return this;
+    }
+
+    @Override
+    public TR<T> $onkeyup(String value) {
+      addAttr("onkeyup", value);
+      return this;
+    }
+
+    @Override
+    public TH<TR<T>> th() {
+      closeAttrs();
+      return th_(this, false);
+    }
+
+    @Override
+    public TR<T> th(String cdata) {
+      return th().__(cdata).__();
+    }
+
+    @Override
+    public TR<T> th(String selector, String cdata) {
+      return setSelector(th(), selector).__(cdata).__();
+    }
+
+    public TR<T> th(String selector, String title, String cdata) {
+      return setSelector(th(), selector).$title(title).__(cdata).__();
+    }
+
+    @Override
+    public TD<TR<T>> td() {
+      closeAttrs();
+      return td_(this, false);
+    }
+
+    @Override
+    public TR<T> td(String cdata) {
+      return td().__(cdata).__();
+    }
+
+    @Override
+    public TR<T> td(String selector, String cdata) {
+      return setSelector(td(), selector).__(cdata).__();
+    }
+  }
+
+  private <T extends __> TH<T> th_(T e, boolean inline) {
+    return new TH<T>("th", e, opt(true, inline, false)); }
+
+  private <T extends __> TD<T> td_(T e, boolean inline) {
+    return new TD<T>("td", e, opt(true, inline, false)); }
+
+  public class COL<T extends __> extends EImp<T> implements HamletSpec.COL {
+    public COL(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public COL<T> $span(int value) {
+      addAttr("span", value);
+      return this;
+    }
+
+    @Override
+    public COL<T> $id(String value) {
+      addAttr("id", value);
+      return this;
+    }
+
+    @Override
+    public COL<T> $class(String value) {
+      addAttr("class", value);
+      return this;
+    }
+
+    @Override
+    public COL<T> $title(String value) {
+      addAttr("title", value);
+      return this;
+    }
+
+    @Override
+    public COL<T> $style(String value) {
+      addAttr("style", value);
+      return this;
+    }
+
+    @Override
+    public COL<T> $lang(String value) {
+      addAttr("lang", value);
+      return this;
+    }
+
+    @Override
+    public COL<T> $dir(Dir value) {
+      addAttr("dir", value);
+      return this;
+    }
+
+    @Override
+    public COL<T> $onclick(String value) {
+      addAttr("onclick", value);
+      return this;
+    }
+
+    @Override
+    public COL<T> $ondblclick(String value) {
+      addAttr("ondblclick", value);
+      return this;
+    }
+
+    @Override
+    public COL<T> $onmousedown(String value) {
+      addAttr("onmousedown", value);
+      return this;
+    }
+
+    @Override
+    public COL<T> $onmouseup(String value) {
+      addAttr("onmouseup", value);
+      return this;
+    }
+
+    @Override
+    public COL<T> $onmouseover(String value) {
+      addAttr("onmouseover", value);
+      return this;
+    }
+
+    @Override
+    public COL<T> $onmousemove(String value) {
+      addAttr("onmousemove", value);
+      return this;
+    }
+
+    @Override
+    public COL<T> $onmouseout(String value) {
+      addAttr("onmouseout", value);
+      return this;
+    }
+
+    @Override
+    public COL<T> $onkeypress(String value) {
+      addAttr("onkeypress", value);
+      return this;
+    }
+
+    @Override
+    public COL<T> $onkeydown(String value) {
+      addAttr("onkeydown", value);
+      return this;
+    }
+
+    @Override
+    public COL<T> $onkeyup(String value) {
+      addAttr("onkeyup", value);
+      return this;
+    }
+  }
+
+  public class COLGROUP<T extends __> extends EImp<T> implements HamletSpec.COLGROUP {
+    public COLGROUP(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public COLGROUP<T> $span(int value) {
+      addAttr("span", value);
+      return this;
+    }
+
+    @Override
+    public COLGROUP<T> $id(String value) {
+      addAttr("id", value);
+      return this;
+    }
+
+    @Override
+    public COLGROUP<T> $class(String value) {
+      addAttr("class", value);
+      return this;
+    }
+
+    @Override
+    public COLGROUP<T> $title(String value) {
+      addAttr("title", value);
+      return this;
+    }
+
+    @Override
+    public COLGROUP<T> $style(String value) {
+      addAttr("style", value);
+      return this;
+    }
+
+    @Override
+    public COLGROUP<T> $lang(String value) {
+      addAttr("lang", value);
+      return this;
+    }
+
+    @Override
+    public COLGROUP<T> $dir(Dir value) {
+      addAttr("dir", value);
+      return this;
+    }
+
+    @Override
+    public COLGROUP<T> $onclick(String value) {
+      addAttr("onclick", value);
+      return this;
+    }
+
+    @Override
+    public COLGROUP<T> $ondblclick(String value) {
+      addAttr("ondblclick", value);
+      return this;
+    }
+
+    @Override
+    public COLGROUP<T> $onmousedown(String value) {
+      addAttr("onmousedown", value);
+      return this;
+    }
+
+    @Override
+    public COLGROUP<T> $onmouseup(String value) {
+      addAttr("onmouseup", value);
+      return this;
+    }
+
+    @Override
+    public COLGROUP<T> $onmouseover(String value) {
+      addAttr("onmouseover", value);
+      return this;
+    }
+
+    @Override
+    public COLGROUP<T> $onmousemove(String value) {
+      addAttr("onmousemove", value);
+      return this;
+    }
+
+    @Override
+    public COLGROUP<T> $onmouseout(String value) {
+      addAttr("onmouseout", value);
+      return this;
+    }
+
+    @Override
+    public COLGROUP<T> $onkeypress(String value) {
+      addAttr("onkeypress", value);
+      return this;
+    }
+
+    @Override
+    public COLGROUP<T> $onkeydown(String value) {
+      addAttr("onkeydown", value);
+      return this;
+    }
+
+    @Override
+    public COLGROUP<T> $onkeyup(String value) {
+      addAttr("onkeyup", value);
+      return this;
+    }
+
+    @Override
+    public COL<COLGROUP<T>> col() {
+      closeAttrs();
+      return col_(this, false);
+    }
+
+    @Override
+    public COLGROUP<T> col(String selector) {
+      return setSelector(col(), selector).__();
+    }
+  }
+
+  public class TBODY<T extends __> extends EImp<T> implements HamletSpec.TBODY {
+    public TBODY(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public TBODY<T> $id(String value) {
+      addAttr("id", value);
+      return this;
+    }
+
+    @Override
+    public TBODY<T> $class(String value) {
+      addAttr("class", value);
+      return this;
+    }
+
+    @Override
+    public TBODY<T> $title(String value) {
+      addAttr("title", value);
+      return this;
+    }
+
+    @Override
+    public TBODY<T> $style(String value) {
+      addAttr("style", value);
+      return this;
+    }
+
+    @Override
+    public TBODY<T> $lang(String value) {
+      addAttr("lang", value);
+      return this;
+    }
+
+    @Override
+    public TBODY<T> $dir(Dir value) {
+      addAttr("dir", value);
+      return this;
+    }
+
+    @Override
+    public TBODY<T> $onclick(String value) {
+      addAttr("onclick", value);
+      return this;
+    }
+
+    @Override
+    public TBODY<T> $ondblclick(String value) {
+      addAttr("ondblclick", value);
+      return this;
+    }
+
+    @Override
+    public TBODY<T> $onmousedown(String value) {
+      addAttr("onmousedown", value);
+      return this;
+    }
+
+    @Override
+    public TBODY<T> $onmouseup(String value) {
+      addAttr("onmouseup", value);
+      return this;
+    }
+
+    @Override
+    public TBODY<T> $onmouseover(String value) {
+      addAttr("onmouseover", value);
+      return this;
+    }
+
+    @Override
+    public TBODY<T> $onmousemove(String value) {
+      addAttr("onmousemove", value);
+      return this;
+    }
+
+    @Override
+    public TBODY<T> $onmouseout(String value) {
+      addAttr("onmouseout", value);
+      return this;
+    }
+
+    @Override
+    public TBODY<T> $onkeypress(String value) {
+      addAttr("onkeypress", value);
+      return this;
+    }
+
+    @Override
+    public TBODY<T> $onkeydown(String value) {
+      addAttr("onkeydown", value);
+      return this;
+    }
+
+    @Override
+    public TBODY<T> $onkeyup(String value) {
+      addAttr("onkeyup", value);
+      return this;
+    }
+
+    @Override
+    public TR<TBODY<T>> tr() {
+      closeAttrs();
+      return tr_(this, false);
+    }
+
+    @Override
+    public TR<TBODY<T>> tr(String selector) {
+      return setSelector(tr(), selector);
+    }
+  }
+
+  public class TFOOT<T extends __> extends EImp<T> implements HamletSpec.TFOOT {
+    public TFOOT(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public TFOOT<T> $id(String value) {
+      addAttr("id", value);
+      return this;
+    }
+
+    @Override
+    public TFOOT<T> $class(String value) {
+      addAttr("class", value);
+      return this;
+    }
+
+    @Override
+    public TFOOT<T> $title(String value) {
+      addAttr("title", value);
+      return this;
+    }
+
+    @Override
+    public TFOOT<T> $style(String value) {
+      addAttr("style", value);
+      return this;
+    }
+
+    @Override
+    public TFOOT<T> $lang(String value) {
+      addAttr("lang", value);
+      return this;
+    }
+
+    @Override
+    public TFOOT<T> $dir(Dir value) {
+      addAttr("dir", value);
+      return this;
+    }
+
+    @Override
+    public TFOOT<T> $onclick(String value) {
+      addAttr("onclick", value);
+      return this;
+    }
+
+    @Override
+    public TFOOT<T> $ondblclick(String value) {
+      addAttr("ondblclick", value);
+      return this;
+    }
+
+    @Override
+    public TFOOT<T> $onmousedown(String value) {
+      addAttr("onmousedown", value);
+      return this;
+    }
+
+    @Override
+    public TFOOT<T> $onmouseup(String value) {
+      addAttr("onmouseup", value);
+      return this;
+    }
+
+    @Override
+    public TFOOT<T> $onmouseover(String value) {
+      addAttr("onmouseover", value);
+      return this;
+    }
+
+    @Override
+    public TFOOT<T> $onmousemove(String value) {
+      addAttr("onmousemove", value);
+      return this;
+    }
+
+    @Override
+    public TFOOT<T> $onmouseout(String value) {
+      addAttr("onmouseout", value);
+      return this;
+    }
+
+    @Override
+    public TFOOT<T> $onkeypress(String value) {
+      addAttr("onkeypress", value);
+      return this;
+    }
+
+    @Override
+    public TFOOT<T> $onkeydown(String value) {
+      addAttr("onkeydown", value);
+      return this;
+    }
+
+    @Override
+    public TFOOT<T> $onkeyup(String value) {
+      addAttr("onkeyup", value);
+      return this;
+    }
+
+    @Override
+    public TR<TFOOT<T>> tr() {
+      closeAttrs();
+      return tr_(this, false);
+    }
+
+    @Override
+    public TR<TFOOT<T>> tr(String selector) {
+      return setSelector(tr(), selector);
+    }
+  }
+
+  public class THEAD<T extends __> extends EImp<T> implements HamletSpec.THEAD {
+    public THEAD(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public THEAD<T> $id(String value) {
+      addAttr("id", value);
+      return this;
+    }
+
+    @Override
+    public THEAD<T> $class(String value) {
+      addAttr("class", value);
+      return this;
+    }
+
+    @Override
+    public THEAD<T> $title(String value) {
+      addAttr("title", value);
+      return this;
+    }
+
+    @Override
+    public THEAD<T> $style(String value) {
+      addAttr("style", value);
+      return this;
+    }
+
+    @Override
+    public THEAD<T> $lang(String value) {
+      addAttr("lang", value);
+      return this;
+    }
+
+    @Override
+    public THEAD<T> $dir(Dir value) {
+      addAttr("dir", value);
+      return this;
+    }
+
+    @Override
+    public THEAD<T> $onclick(String value) {
+      addAttr("onclick", value);
+      return this;
+    }
+
+    @Override
+    public THEAD<T> $ondblclick(String value) {
+      addAttr("ondblclick", value);
+      return this;
+    }
+
+    @Override
+    public THEAD<T> $onmousedown(String value) {
+      addAttr("onmousedown", value);
+      return this;
+    }
+
+    @Override
+    public THEAD<T> $onmouseup(String value) {
+      addAttr("onmouseup", value);
+      return this;
+    }
+
+    @Override
+    public THEAD<T> $onmouseover(String value) {
+      addAttr("onmouseover", value);
+      return this;
+    }
+
+    @Override
+    public THEAD<T> $onmousemove(String value) {
+      addAttr("onmousemove", value);
+      return this;
+    }
+
+    @Override
+    public THEAD<T> $onmouseout(String value) {
+      addAttr("onmouseout", value);
+      return this;
+    }
+
+    @Override
+    public THEAD<T> $onkeypress(String value) {
+      addAttr("onkeypress", value);
+      return this;
+    }
+
+    @Override
+    public THEAD<T> $onkeydown(String value) {
+      addAttr("onkeydown", value);
+      return this;
+    }
+
+    @Override
+    public THEAD<T> $onkeyup(String value) {
+      addAttr("onkeyup", value);
+      return this;
+    }
+
+    @Override
+    public TR<THEAD<T>> tr() {
+      closeAttrs();
+      return tr_(this, false);
+    }
+
+    @Override
+    public TR<THEAD<T>> tr(String selector) {
+      return setSelector(tr(), selector);
+    }
+  }
+
+  public class CAPTION<T extends __> extends EImp<T> implements HamletSpec.CAPTION {
+    public CAPTION(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public CAPTION<T> $id(String value) {
+      addAttr("id", value);
+      return this;
+    }
+
+    @Override
+    public CAPTION<T> $class(String value) {
+      addAttr("class", value);
+      return this;
+    }
+
+    @Override
+    public CAPTION<T> $title(String value) {
+      addAttr("title", value);
+      return this;
+    }
+
+    @Override
+    public CAPTION<T> $style(String value) {
+      addAttr("style", value);
+      return this;
+    }
+
+    @Override
+    public CAPTION<T> $lang(String value) {
+      addAttr("lang", value);
+      return this;
+    }
+
+    @Override
+    public CAPTION<T> $dir(Dir value) {
+      addAttr("dir", value);
+      return this;
+    }
+
+    @Override
+    public CAPTION<T> $onclick(String value) {
+      addAttr("onclick", value);
+      return this;
+    }
+
+    @Override
+    public CAPTION<T> $ondblclick(String value) {
+      addAttr("ondblclick", value);
+      return this;
+    }
+
+    @Override
+    public CAPTION<T> $onmousedown(String value) {
+      addAttr("onmousedown", value);
+      return this;
+    }
+
+    @Override
+    public CAPTION<T> $onmouseup(String value) {
+      addAttr("onmouseup", value);
+      return this;
+    }
+
+    @Override
+    public CAPTION<T> $onmouseover(String value) {
+      addAttr("onmouseover", value);
+      return this;
+    }
+
+    @Override
+    public CAPTION<T> $onmousemove(String value) {
+      addAttr("onmousemove", value);
+      return this;
+    }
+
+    @Override
+    public CAPTION<T> $onmouseout(String value) {
+      addAttr("onmouseout", value);
+      return this;
+    }
+
+    @Override
+    public CAPTION<T> $onkeypress(String value) {
+      addAttr("onkeypress", value);
+      return this;
+    }
+
+    @Override
+    public CAPTION<T> $onkeydown(String value) {
+      addAttr("onkeydown", value);
+      return this;
+    }
+
+    @Override
+    public CAPTION<T> $onkeyup(String value) {
+      addAttr("onkeyup", value);
+      return this;
+    }
+
+    @Override
+    public CAPTION<T> __(Object... lines) {
+      _p(true, lines);
+      return this;
+    }
+
+    @Override
+    public CAPTION<T> _r(Object... lines) {
+      _p(false, lines);
+      return this;
+    }
+
+    @Override
+    public B<CAPTION<T>> b() {
+      closeAttrs();
+      return b_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> b(String cdata) {
+      return b().__(cdata).__();
+    }
+
+    @Override
+    public CAPTION<T> b(String selector, String cdata) {
+      return setSelector(b(), selector).__(cdata).__();
+    }
+
+    @Override
+    public I<CAPTION<T>> i() {
+      closeAttrs();
+      return i_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> i(String cdata) {
+      return i().__(cdata).__();
+    }
+
+    @Override
+    public CAPTION<T> i(String selector, String cdata) {
+      return setSelector(i(), selector).__(cdata).__();
+    }
+
+    @Override
+    public SMALL<CAPTION<T>> small() {
+      closeAttrs();
+      return small_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> small(String cdata) {
+      return small().__(cdata).__();
+    }
+
+    @Override
+    public CAPTION<T> small(String selector, String cdata) {
+      return setSelector(small(), selector).__(cdata).__();
+    }
+
+    @Override
+    public CAPTION<T> em(String cdata) {
+      return em().__(cdata).__();
+    }
+
+    @Override
+    public EM<CAPTION<T>> em() {
+      closeAttrs();
+      return em_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> em(String selector, String cdata) {
+      return setSelector(em(), selector).__(cdata).__();
+    }
+
+    @Override
+    public STRONG<CAPTION<T>> strong() {
+      closeAttrs();
+      return strong_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> strong(String cdata) {
+      return strong().__(cdata).__();
+    }
+
+    @Override
+    public CAPTION<T> strong(String selector, String cdata) {
+      return setSelector(strong(), selector).__(cdata).__();
+    }
+
+    @Override
+    public DFN<CAPTION<T>> dfn() {
+      closeAttrs();
+      return dfn_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> dfn(String cdata) {
+      return dfn().__(cdata).__();
+    }
+
+    @Override
+    public CAPTION<T> dfn(String selector, String cdata) {
+      return setSelector(dfn(), selector).__(cdata).__();
+    }
+
+    @Override
+    public CODE<CAPTION<T>> code() {
+      closeAttrs();
+      return code_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> code(String cdata) {
+      return code().__(cdata).__();
+    }
+
+    @Override
+    public CAPTION<T> code(String selector, String cdata) {
+      return setSelector(code(), selector).__(cdata).__();
+    }
+
+    @Override
+    public CAPTION<T> samp(String cdata) {
+      return samp().__(cdata).__();
+    }
+
+    @Override
+    public SAMP<CAPTION<T>> samp() {
+      closeAttrs();
+      return samp_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> samp(String selector, String cdata) {
+      return setSelector(samp(), selector).__(cdata).__();
+    }
+
+    @Override
+    public KBD<CAPTION<T>> kbd() {
+      closeAttrs();
+      return kbd_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> kbd(String cdata) {
+      return kbd().__(cdata).__();
+    }
+
+    @Override
+    public CAPTION<T> kbd(String selector, String cdata) {
+      return setSelector(kbd(), selector).__(cdata).__();
+    }
+
+    @Override
+    public VAR<CAPTION<T>> var() {
+      closeAttrs();
+      return var_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> var(String cdata) {
+      return var().__(cdata).__();
+    }
+
+    @Override
+    public CAPTION<T> var(String selector, String cdata) {
+      return setSelector(var(), selector).__(cdata).__();
+    }
+
+    @Override
+    public CITE<CAPTION<T>> cite() {
+      closeAttrs();
+      return cite_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> cite(String cdata) {
+      return cite().__(cdata).__();
+    }
+
+    @Override
+    public CAPTION<T> cite(String selector, String cdata) {
+      return setSelector(cite(), selector).__(cdata).__();
+    }
+
+    @Override
+    public ABBR<CAPTION<T>> abbr() {
+      closeAttrs();
+      return abbr_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> abbr(String cdata) {
+      return abbr().__(cdata).__();
+    }
+
+    @Override
+    public CAPTION<T> abbr(String selector, String cdata) {
+      return setSelector(abbr(), selector).__(cdata).__();
+    }
+
+    @Override
+    public A<CAPTION<T>> a() {
+      closeAttrs();
+      return a_(this, true);
+    }
+
+    @Override
+    public A<CAPTION<T>> a(String selector) {
+      return setSelector(a(), selector);
+    }
+
+    @Override
+    public CAPTION<T> a(String href, String anchorText) {
+      return a().$href(href).__(anchorText).__();
+    }
+
+    @Override
+    public CAPTION<T> a(String selector, String href, String anchorText) {
+      return setSelector(a(), selector).$href(href).__(anchorText).__();
+    }
+
+    @Override
+    public IMG<CAPTION<T>> img() {
+      closeAttrs();
+      return img_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> img(String src) {
+      return img().$src(src).__();
+    }
+
+    @Override
+    public OBJECT<CAPTION<T>> object() {
+      closeAttrs();
+      return object_(this, true);
+    }
+
+    @Override
+    public OBJECT<CAPTION<T>> object(String selector) {
+      return setSelector(object(), selector);
+    }
+
+    @Override
+    public SUB<CAPTION<T>> sub() {
+      closeAttrs();
+      return sub_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> sub(String cdata) {
+      return sub().__(cdata).__();
+    }
+
+    @Override
+    public CAPTION<T> sub(String selector, String cdata) {
+      return setSelector(sub(), selector).__(cdata).__();
+    }
+
+    @Override
+    public SUP<CAPTION<T>> sup() {
+      closeAttrs();
+      return sup_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> sup(String cdata) {
+      return sup().__(cdata).__();
+    }
+
+    @Override
+    public CAPTION<T> sup(String selector, String cdata) {
+      return setSelector(sup(), selector).__(cdata).__();
+    }
+
+    @Override
+    public MAP<CAPTION<T>> map() {
+      closeAttrs();
+      return map_(this, true);
+    }
+
+    @Override
+    public MAP<CAPTION<T>> map(String selector) {
+      return setSelector(map(), selector);
+    }
+
+    @Override
+    public CAPTION<T> q(String cdata) {
+      return q().__(cdata).__();
+    }
+
+    @Override
+    public CAPTION<T> q(String selector, String cdata) {
+      return setSelector(q(), selector).__(cdata).__();
+    }
+
+    @Override
+    public Q<CAPTION<T>> q() {
+      closeAttrs();
+      return q_(this, true);
+    }
+
+    @Override
+    public BR<CAPTION<T>> br() {
+      closeAttrs();
+      return br_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> br(String selector) {
+      return setSelector(br(), selector).__();
+    }
+
+    @Override
+    public BDO<CAPTION<T>> bdo() {
+      closeAttrs();
+      return bdo_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> bdo(Dir dir, String cdata) {
+      return bdo().$dir(dir).__(cdata).__();
+    }
+
+    @Override
+    public SPAN<CAPTION<T>> span() {
+      closeAttrs();
+      return span_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> span(String cdata) {
+      return span().__(cdata).__();
+    }
+
+    @Override
+    public CAPTION<T> span(String selector, String cdata) {
+      return setSelector(span(), selector).__(cdata).__();
+    }
+
+    @Override
+    public SCRIPT<CAPTION<T>> script() {
+      closeAttrs();
+      return script_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> script(String src) {
+      return setScriptSrc(script(), src).__();
+    }
+
+    @Override
+    public INS<CAPTION<T>> ins() {
+      closeAttrs();
+      return ins_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> ins(String cdata) {
+      return ins().__(cdata).__();
+    }
+
+    @Override
+    public DEL<CAPTION<T>> del() {
+      closeAttrs();
+      return del_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> del(String cdata) {
+      return del().__(cdata).__();
+    }
+
+    @Override
+    public LABEL<CAPTION<T>> label() {
+      closeAttrs();
+      return label_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> label(String forId, String cdata) {
+      return label().$for(forId).__(cdata).__();
+    }
+
+    @Override
+    public INPUT<CAPTION<T>> input(String selector) {
+      return setSelector(input(), selector);
+    }
+
+    @Override
+    public INPUT<CAPTION<T>> input() {
+      closeAttrs();
+      return input_(this, true);
+    }
+
+    @Override
+    public SELECT<CAPTION<T>> select() {
+      closeAttrs();
+      return select_(this, true);
+    }
+
+    @Override
+    public SELECT<CAPTION<T>> select(String selector) {
+      return setSelector(select(), selector);
+    }
+
+    @Override
+    public TEXTAREA<CAPTION<T>> textarea(String selector) {
+      return setSelector(textarea(), selector);
+    }
+
+    @Override
+    public TEXTAREA<CAPTION<T>> textarea() {
+      closeAttrs();
+      return textarea_(this, true);
+    }
+
+    @Override
+    public CAPTION<T> textarea(String selector, String cdata) {
+      return setSelector(textarea(), selector).__(cdata).__();
+    }
+
+    @Override
+    public BUTTON<CAPTION<T>> button() {
+      closeAttrs();
+      return button_(this, true);
+    }
+
+    @Override
+    public BUTTON<CAPTION<T>> button(String selector) {
+      return setSelector(button(), selector);
+    }
+
+    @Override
+    public CAPTION<T> button(String selector, String cdata) {
+      return setSelector(button(), selector).__(cdata).__();
+    }
+  }
+
+  public class TABLE<T extends __> extends EImp<T> implements HamletSpec.TABLE {
+    public TABLE(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public TABLE<T> $id(String value) {
+      addAttr("id", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<T> $class(String value) {
+      addAttr("class", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<T> $title(String value) {
+      addAttr("title", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<T> $style(String value) {
+      addAttr("style", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<T> $lang(String value) {
+      addAttr("lang", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<T> $dir(Dir value) {
+      addAttr("dir", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<T> $onclick(String value) {
+      addAttr("onclick", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<T> $ondblclick(String value) {
+      addAttr("ondblclick", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<T> $onmousedown(String value) {
+      addAttr("onmousedown", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<T> $onmouseup(String value) {
+      addAttr("onmouseup", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<T> $onmouseover(String value) {
+      addAttr("onmouseover", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<T> $onmousemove(String value) {
+      addAttr("onmousemove", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<T> $onmouseout(String value) {
+      addAttr("onmouseout", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<T> $onkeypress(String value) {
+      addAttr("onkeypress", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<T> $onkeydown(String value) {
+      addAttr("onkeydown", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<T> $onkeyup(String value) {
+      addAttr("onkeyup", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<T> caption(String cdata) {
+      return caption().__(cdata).__();
+    }
+
+    @Override
+    public CAPTION<TABLE<T>> caption() {
+      closeAttrs();
+      return caption_(this, false);
+    }
+
+    @Override
+    public COLGROUP<TABLE<T>> colgroup() {
+      closeAttrs();
+      return colgroup_(this, false);
+    }
+
+    @Override
+    public THEAD<TABLE<T>> thead(String selector) {
+      return setSelector(thead(), selector);
+    }
+
+    @Override
+    public THEAD<TABLE<T>> thead() {
+      closeAttrs();
+      return thead_(this, false);
+    }
+
+    @Override
+    public TFOOT<TABLE<T>> tfoot() {
+      closeAttrs();
+      return tfoot_(this, false);
+    }
+
+    @Override
+    public TFOOT<TABLE<T>> tfoot(String selector) {
+      return setSelector(tfoot(), selector);
+    }
+
+    @Override
+    public TBODY<TABLE<T>> tbody() {
+      closeAttrs();
+      return tbody_(this, false);
+    }
+
+    @Override
+    public TBODY<TABLE<T>> tbody(String selector) {
+      return setSelector(tbody(), selector);
+    }
+
+    @Override
+    public TR<TABLE<T>> tr() {
+      closeAttrs();
+      return tr_(this, false);
+    }
+
+    @Override
+    public TR<TABLE<T>> tr(String selector) {
+      return setSelector(tr(), selector);
+    }
+
+    @Override
+    public COL<TABLE<T>> col() {
+      closeAttrs();
+      return col_(this, false);
+    }
+
+    @Override
+    public TABLE<T> col(String selector) {
+      return setSelector(col(), selector).__();
+    }
+  }
+
+  private <T extends __> CAPTION<T> caption_(T e, boolean inline) {
+    return new CAPTION<T>("caption", e, opt(true, inline, false)); }
+
+  private <T extends __> COLGROUP<T> colgroup_(T e, boolean inline) {
+    return new COLGROUP<T>("colgroup", e, opt(false, inline, false)); }
+
+  private <T extends __> THEAD<T> thead_(T e, boolean inline) {
+    return new THEAD<T>("thead", e, opt(true, inline, false)); }
+
+  private <T extends __> TFOOT<T> tfoot_(T e, boolean inline) {
+    return new TFOOT<T>("tfoot", e, opt(true, inline, false)); }
+
+  private <T extends __> TBODY<T> tbody_(T e, boolean inline) {
+    return new TBODY<T>("tbody", e, opt(true, inline, false)); }
+
+  private <T extends __> COL<T> col_(T e, boolean inline) {
+    return new COL<T>("col", e, opt(false, inline, false)); }
+
+  private <T extends __> TR<T> tr_(T e, boolean inline) {
+    return new TR<T>("tr", e, opt(true, inline, false)); }
+
+  public class BUTTON<T extends __> extends EImp<T> implements HamletSpec.BUTTON {
+    public BUTTON(String name, T parent, EnumSet<EOpt> opts) {
+      super(name, parent, opts);
+    }
+
+    @Override
+    public BUTTON<T> $type(ButtonType value) {
+      addAttr("type", value);
+      return this;
+    }
+
+    @Override
+    public BUTTON<T> $name(String value) {
+      addAttr("name", value);
+      return this;
+    }
+
+    @Override
+    public BUTTON<T> $value(String value) {
+      addAttr("value", value);
+      return this;
+    }
+
+    @Override
+    public BUTTON<T> $disabled() {
+      addAttr("disabled", null);
+      return this;
+    }
+
+    @Override
+    public BUTTON<T> $tabindex(int value) {
+      addAttr("tabindex", value);
+      return this;
+    }
+
+    @Override
+    public BUTTON<T> $accesskey(String value) {
+      addAttr("accesskey", value);
+      return this;
+    }
+
+    @Override
+    public BUTTON<T> $onfocus(String value) {
+      addAttr("onfocus", value);
+      return this;
+    }
+
+    @Override
+    public BUTTON<T> $onblur(String value) {
+      addAttr("onblur", value);
+      return this;
+    }
+
+    @Override
+    public TABLE<BUTTON<T>> table() {
+      closeAttrs();
+      return table_(this, false);
+    }
+
+    @Override
+    public TABLE<BUTTON<T>> table(String selector) {
+      return setSelector(table(), selector);
+    }
+
+    @Override
+    public BUTTON<T> address(String cdata) {
+      return address().__(cdata).__();
+    }
+
+    @Override
+    public ADDRESS<BUTTON<T>> address() {
+      closeAttrs();
+      return address_(this, false);
+    }
+
+    @Override
+    public P<BUTTON<T>> p(String selector) {
+      return setSelector(p(), selector);
+    }
+
+    @Override
+    public P<BUTTON<T>> p() {
+      closeAttrs();
+      return p_(this, false);
+    }
+
+    @Override
+    public BUTTON<T> __(Class<? extends SubView> cls) {
+      _v(cls);
+      return this;
+    }
+
+    @Override
+    public HR<BUTTON<T>> hr() {
+      closeAttrs();
+      return hr_(this, false);
+    }
+
+    @Override
+    public BUTTON<T> hr(String selector) {
+      return setSelector(hr(), selector).__();
+    }
+
+    @Override
+    public DL<BUTTON<T>> dl(String selector) {
+      return setSelector(dl(), selector);
+    }
+
+    @Override
+    public DL<BUTTON<T>> dl() {
+      closeAttrs();
+      return dl_(this, false);
+    }
+
+    @Override
+    public DIV<BUTTON<T>> div(String selector) {
+      return setSelector(div(), selector);
+    }
+
+    @Override
+    public DIV<BUTTON<T>> div() {
+      closeAttrs();
+      return div_(this, false);
+    }
+
+    @Override
+    public BLOCKQUOTE<BUTTON<T>> blockquote() {
+      closeAttrs();
+      return blockquote_(this, false);
+    }
+
+    @Override
+    public BLOCKQUOTE<BUTTON<T>> bq() {
+      closeAttrs();
+      return blockquote_(this, false);
+    }
+
+    @Override
+    public BUTTON<T> h1(String cdata) {
+      return h1().__(cdata).__();
+    }
+
+    @Override
+    public H1<BUTTON<T>> h1() {
+      closeAttrs();
+      return h1_(this, false);
+    }
+
+    @Override
+    public BUTTON<T> h1(String selector, String cdata) {
+      return setSelector(h1(), selector).__(cdata).__();
+    }
+
+    @Override
+    public BUTTON<T> h2(String cdata) {
+      return h2().__(cdata).__();
+    }
+
+    @Override
+    public H2<BUTTON<T>> h2() {
+      closeAttrs();
+      return h2_(this, false);
+    }
+
+    @Override
+    public BUTTON<T> h2(String selector, String cdata) {
+      return setSelector(h2(), selector).__(cdata).__();
+    }
+
+    @Override
+    public H3<BUTTON<T>> h3() {
+      closeAttrs();
+      return h3_(this, false);
+    }
+
+    @Override
+    public BUTTON<T> h3(String cdata) {
+      return h3().__(cdata).__();
+    }
+
+    @Override
+    public BUTTON<T> h3(String selector, String cdata) {
+      return setSelector(h3(), selector).__(cdata).__();
+    }
+
+    @Override
+    public H4<BUTTON<T>> h4() {
+      closeAttrs();
+      return h4_(this, false);
+    }
+
+    @Override
+    public BUTTON<T> h4(String cdata) {
+      return h4().__(cdata).__();
+    }
+
+    @Override
+    public BUTTON<T> h4(String selector, String cdata) {
+      return setSelector(h4(), selector).__(cdata).__();
+    }
+
+    @Override
+    public H5<BUTTON<T>> h5() {
+      closeAttrs();
+      return h5_(this, false);
+    }
+
+    @Override
+    public BUTTON<T> h5(String cdata) {
+      return h5().__(cdata).__();
+    }
+
+    @Override
+    public BUTTON<T> h5(String selector, String cdata) {
+      return setSelector(h5(), selector).__(cdata).__();
+    }
+
+    @Override
+    public H6<BUTTON<T>> h6() {
+      closeAttrs();
+      return h6_(this, false);
+    }
+
+    @Override
+    public BUTTON<T> h6(String cdata) {
+      return h6().__(cdata).__();
+    }
+
+    @Override
+    public BUTTON<T> h6(String selector, String cdata) {
+      return setSelector(h6(), selector).__(cdata).__();
+    }
+
+    @Override
+    public UL<BUTTON<T>> ul() {
+      closeAttrs();
+      return ul_(this, false);
+    }
+
+    @Override
+    public UL<BUTTON<T>> ul(String selector) {
+      return setSelector(ul(), selector);
+    }
+
+    @Override
+    public OL<BUTTON<T>> ol() {
+      closeAttrs();
+      return ol_(this, false);
+    }
+
+    @Override
+    public OL<BUTTON<T>> ol(String selector) {
+      return setSelector(ol(), selector);
+    }
+
+    @Override
+    public PRE<BUTTON<T>> pre() {
+      closeAttrs();
+      return pre_(this, false);
+    }
+
+    @Override
+    public PRE<BUTTON<T>> pre(String selector) {
+      return setSelector(pre(), selector);
+    }
+
+    @Override
+    public BUTTON<T> __(Object... lines) {
+      _p(true, lines);
+      return this;
+    }
+
+    @Override
+    public BUTTON<T> _r(Object... lines) {
+      _p(false, lines);
+      return this;
+    }
+
+    @Override
+    public B<BUTTON<T>> b() {
+      closeAttrs();
+      return b_(this, true);
+    }
+
+    @Override
+    public BUTTON<T> b(String cdata) {
+      return b().__(cdata).__();
+    }
+
+    @Override
+    public BUTTON<T> b(String selector, String cdata) {
+      return setSelector(b(), selector).__(cdata).__();
+    }
+
+    @Override
+    public I<BUTTON<T>> i() {
+      closeAttrs();
+      return i_(this, true);
+    }
+
+    @Override
+    public BUTTON<T> i(String cdata) {
+      return i().__(cdata).__();
+    }
+
+    @Override
+    public BUTTON<T> i(String selector, String cdata) {
+      return setSelector(i(), selector).__(cdata).__();
+    }
+
+    @Override
+    public SMALL<BUTTON<T>> small() {
+      closeAttrs();
+      return small_(this, true);
+    }
+
+    @Override
+    public BUTTON<T> small(String cdata) {
+      return small().__(cdata).__();
+    }
+
+    @Override
+    public BUTTON<T> small(Str

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: HDFS-12193. Fix style issues in HttpFS tests. Contributed by Zoran Dimitrijevic

Posted by xg...@apache.org.
HDFS-12193. Fix style issues in HttpFS tests. Contributed by Zoran Dimitrijevic


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c98201b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c98201b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c98201b5

Branch: refs/heads/YARN-5734
Commit: c98201b5d83a700b4d081000065c6fd1a6ef2eed
Parents: 94ca52a
Author: Ravi Prakash <ra...@apache.org>
Authored: Mon Jul 24 19:06:15 2017 -0700
Committer: Ravi Prakash <ra...@apache.org>
Committed: Mon Jul 24 19:06:15 2017 -0700

----------------------------------------------------------------------
 .../hadoop/fs/http/server/TestHttpFSServer.java | 106 +++++++++++--------
 .../fs/http/server/TestHttpFSServerNoACLs.java  |  15 +--
 .../http/server/TestHttpFSServerNoXAttrs.java   |  10 +-
 3 files changed, 77 insertions(+), 54 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c98201b5/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
index 7cdb39c..0e1cc20 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
@@ -73,6 +73,9 @@ import com.google.common.collect.Maps;
 import java.util.Properties;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 
+/**
+ * Main test class for HttpFSServer.
+ */
 public class TestHttpFSServer extends HFSTestCase {
 
   @Test
@@ -82,15 +85,20 @@ public class TestHttpFSServer extends HFSTestCase {
     String dir = TestDirHelper.getTestDir().getAbsolutePath();
 
     Configuration httpfsConf = new Configuration(false);
-    HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
+    HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir,
+                                                       httpfsConf);
     server.init();
     server.destroy();
   }
 
-  public static class MockGroups implements Service,Groups {
+  /**
+   * Mock groups.
+   */
+  public static class MockGroups implements Service, Groups {
 
     @Override
-    public void init(org.apache.hadoop.lib.server.Server server) throws ServiceException {
+    public void init(org.apache.hadoop.lib.server.Server server)
+        throws ServiceException {
     }
 
     @Override
@@ -112,8 +120,10 @@ public class TestHttpFSServer extends HFSTestCase {
     }
 
     @Override
-    public void serverStatusChange(org.apache.hadoop.lib.server.Server.Status oldStatus,
-                                   org.apache.hadoop.lib.server.Server.Status newStatus) throws ServiceException {
+    public void serverStatusChange(
+        org.apache.hadoop.lib.server.Server.Status oldStatus,
+        org.apache.hadoop.lib.server.Server.Status newStatus)
+        throws ServiceException {
     }
 
     @Override
@@ -300,25 +310,30 @@ public class TestHttpFSServer extends HFSTestCase {
     createHttpFSServer(false, false);
 
     URL url = new URL(TestJettyHelper.getJettyURL(),
-                      MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "nobody"));
+        MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",
+                             "nobody"));
     HttpURLConnection conn = (HttpURLConnection) url.openConnection();
-    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED);
+    Assert.assertEquals(conn.getResponseCode(),
+                        HttpURLConnection.HTTP_UNAUTHORIZED);
 
     url = new URL(TestJettyHelper.getJettyURL(),
-                  MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",
-                                       HadoopUsersConfTestHelper.getHadoopUsers()[0]));
+        MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",
+                             HadoopUsersConfTestHelper.getHadoopUsers()[0]));
     conn = (HttpURLConnection) url.openConnection();
     Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
-    BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
+    BufferedReader reader = new BufferedReader(
+        new InputStreamReader(conn.getInputStream()));
     String line = reader.readLine();
     reader.close();
     Assert.assertTrue(line.contains("\"counters\":{"));
 
     url = new URL(TestJettyHelper.getJettyURL(),
-                  MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation",
-                                       HadoopUsersConfTestHelper.getHadoopUsers()[0]));
+        MessageFormat.format(
+            "/webhdfs/v1/foo?user.name={0}&op=instrumentation",
+            HadoopUsersConfTestHelper.getHadoopUsers()[0]));
     conn = (HttpURLConnection) url.openConnection();
-    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
+    Assert.assertEquals(conn.getResponseCode(),
+                        HttpURLConnection.HTTP_BAD_REQUEST);
   }
 
   @Test
@@ -330,10 +345,12 @@ public class TestHttpFSServer extends HFSTestCase {
 
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     URL url = new URL(TestJettyHelper.getJettyURL(),
-                      MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus", user));
+        MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus",
+                             user));
     HttpURLConnection conn = (HttpURLConnection) url.openConnection();
     Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
-    BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
+    BufferedReader reader = new BufferedReader(
+        new InputStreamReader(conn.getInputStream()));
     reader.readLine();
     reader.close();
   }
@@ -369,10 +386,12 @@ public class TestHttpFSServer extends HFSTestCase {
 
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     URL url = new URL(TestJettyHelper.getJettyURL(),
-                      MessageFormat.format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user));
+        MessageFormat.format(
+            "/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user));
     HttpURLConnection conn = (HttpURLConnection) url.openConnection();
     Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
-    BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
+    BufferedReader reader = new BufferedReader(
+        new InputStreamReader(conn.getInputStream()));
     reader.readLine();
     reader.close();
   }
@@ -384,15 +403,14 @@ public class TestHttpFSServer extends HFSTestCase {
    * @param perms The permission field, if any (may be null)
    * @throws Exception
    */
-  private void createWithHttp ( String filename, String perms )
-          throws Exception {
+  private void createWithHttp(String filename, String perms) throws Exception {
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     // Remove leading / from filename
-    if ( filename.charAt(0) == '/' ) {
+    if (filename.charAt(0) == '/') {
       filename = filename.substring(1);
     }
     String pathOps;
-    if ( perms == null ) {
+    if (perms == null) {
       pathOps = MessageFormat.format(
               "/webhdfs/v1/{0}?user.name={1}&op=CREATE",
               filename, user);
@@ -422,7 +440,7 @@ public class TestHttpFSServer extends HFSTestCase {
           throws Exception {
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     // Remove leading / from filename
-    if ( filename.charAt(0) == '/' ) {
+    if (filename.charAt(0) == '/') {
       filename = filename.substring(1);
     }
     String pathOps = MessageFormat.format(
@@ -449,7 +467,7 @@ public class TestHttpFSServer extends HFSTestCase {
                       String params) throws Exception {
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     // Remove leading / from filename
-    if ( filename.charAt(0) == '/' ) {
+    if (filename.charAt(0) == '/') {
       filename = filename.substring(1);
     }
     String pathOps = MessageFormat.format(
@@ -471,7 +489,7 @@ public class TestHttpFSServer extends HFSTestCase {
    * @return The value of 'permission' in statusJson
    * @throws Exception
    */
-  private String getPerms ( String statusJson ) throws Exception {
+  private String getPerms(String statusJson) throws Exception {
     JSONParser parser = new JSONParser();
     JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
     JSONObject details = (JSONObject) jsonObject.get("FileStatus");
@@ -499,20 +517,20 @@ public class TestHttpFSServer extends HFSTestCase {
    * @return A List of Strings which are the elements of the ACL entries
    * @throws Exception
    */
-  private List<String> getAclEntries ( String statusJson ) throws Exception {
+  private List<String> getAclEntries(String statusJson) throws Exception {
     List<String> entries = new ArrayList<String>();
     JSONParser parser = new JSONParser();
     JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
     JSONObject details = (JSONObject) jsonObject.get("AclStatus");
     JSONArray jsonEntries = (JSONArray) details.get("entries");
-    if ( jsonEntries != null ) {
+    if (jsonEntries != null) {
       for (Object e : jsonEntries) {
         entries.add(e.toString());
       }
     }
     return entries;
   }
-  
+
   /**
    * Parse xAttrs from JSON result of GETXATTRS call, return xAttrs Map.
    * @param statusJson JSON from GETXATTRS
@@ -533,8 +551,8 @@ public class TestHttpFSServer extends HFSTestCase {
     }
     return xAttrs;
   }
-  
-  /** Decode xattr value from string */
+
+  /** Decode xattr value from string. */
   private byte[] decodeXAttrValue(String value) throws IOException {
     if (value != null) {
       return XAttrCodec.decodeValue(value);
@@ -574,7 +592,7 @@ public class TestHttpFSServer extends HFSTestCase {
     statusJson = getStatus("/perm/p-321", "GETFILESTATUS");
     Assert.assertTrue("321".equals(getPerms(statusJson)));
   }
-  
+
   /**
    * Validate XAttr get/set/remove calls.
    */
@@ -594,12 +612,12 @@ public class TestHttpFSServer extends HFSTestCase {
 
     FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
     fs.mkdirs(new Path(dir));
-    
-    createWithHttp(path,null);
+
+    createWithHttp(path, null);
     String statusJson = getStatus(path, "GETXATTRS");
     Map<String, byte[]> xAttrs = getXAttrs(statusJson);
     Assert.assertEquals(0, xAttrs.size());
-    
+
     // Set two xattrs
     putCmd(path, "SETXATTR", setXAttrParam(name1, value1));
     putCmd(path, "SETXATTR", setXAttrParam(name2, value2));
@@ -608,25 +626,26 @@ public class TestHttpFSServer extends HFSTestCase {
     Assert.assertEquals(2, xAttrs.size());
     Assert.assertArrayEquals(value1, xAttrs.get(name1));
     Assert.assertArrayEquals(value2, xAttrs.get(name2));
-    
+
     // Remove one xattr
     putCmd(path, "REMOVEXATTR", "xattr.name=" + name1);
     statusJson = getStatus(path, "GETXATTRS");
     xAttrs = getXAttrs(statusJson);
     Assert.assertEquals(1, xAttrs.size());
     Assert.assertArrayEquals(value2, xAttrs.get(name2));
-    
+
     // Remove another xattr, then there is no xattr
     putCmd(path, "REMOVEXATTR", "xattr.name=" + name2);
     statusJson = getStatus(path, "GETXATTRS");
     xAttrs = getXAttrs(statusJson);
     Assert.assertEquals(0, xAttrs.size());
   }
-  
-  /** Params for setting an xAttr */
-  public static String setXAttrParam(String name, byte[] value) throws IOException {
+
+  /** Params for setting an xAttr. */
+  public static String setXAttrParam(String name, byte[] value)
+      throws IOException {
     return "xattr.name=" + name + "&xattr.value=" + XAttrCodec.encodeValue(
-        value, XAttrCodec.HEX) + "&encoding=hex&flag=create"; 
+        value, XAttrCodec.HEX) + "&encoding=hex&flag=create";
   }
 
   /**
@@ -791,7 +810,9 @@ public class TestHttpFSServer extends HFSTestCase {
 
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     URL url = new URL(TestJettyHelper.getJettyURL(),
-                      MessageFormat.format("/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2", user));
+        MessageFormat.format(
+            "/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2",
+            user));
     HttpURLConnection conn = (HttpURLConnection) url.openConnection();
     Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
     InputStream is = conn.getInputStream();
@@ -809,12 +830,13 @@ public class TestHttpFSServer extends HFSTestCase {
 
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     URL url = new URL(TestJettyHelper.getJettyURL(),
-                      MessageFormat.format("/webhdfs/v1/foo?user.name={0}", user));
+        MessageFormat.format("/webhdfs/v1/foo?user.name={0}", user));
     HttpURLConnection conn = (HttpURLConnection) url.openConnection();
     conn.setDoInput(true);
     conn.setDoOutput(true);
     conn.setRequestMethod("PUT");
-    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
+    Assert.assertEquals(conn.getResponseCode(),
+        HttpURLConnection.HTTP_BAD_REQUEST);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c98201b5/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoACLs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoACLs.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoACLs.java
index 289ddc4..c679dba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoACLs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoACLs.java
@@ -110,12 +110,12 @@ public class TestHttpFSServerNoACLs extends HTestCase {
 
     // HDFS configuration
     File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
-    if ( !hadoopConfDir.mkdirs() ) {
+    if (!hadoopConfDir.mkdirs()) {
       throw new IOException();
     }
 
     String fsDefaultName =
-            nnConf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
+        nnConf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
     Configuration conf = new Configuration(false);
     conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
 
@@ -146,7 +146,7 @@ public class TestHttpFSServerNoACLs extends HTestCase {
 
     ClassLoader cl = Thread.currentThread().getContextClassLoader();
     URL url = cl.getResource("webapp");
-    if ( url == null ) {
+    if (url == null) {
       throw new IOException();
     }
     WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
@@ -168,7 +168,7 @@ public class TestHttpFSServerNoACLs extends HTestCase {
           throws Exception {
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     // Remove leading / from filename
-    if ( filename.charAt(0) == '/' ) {
+    if (filename.charAt(0) == '/') {
       filename = filename.substring(1);
     }
     String pathOps = MessageFormat.format(
@@ -179,7 +179,7 @@ public class TestHttpFSServerNoACLs extends HTestCase {
     conn.connect();
     int resp = conn.getResponseCode();
     BufferedReader reader;
-    if ( expectOK ) {
+    if (expectOK) {
       Assert.assertEquals(HttpURLConnection.HTTP_OK, resp);
       reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
       String res = reader.readLine();
@@ -204,7 +204,7 @@ public class TestHttpFSServerNoACLs extends HTestCase {
                       String params, boolean expectOK) throws Exception {
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     // Remove leading / from filename
-    if ( filename.charAt(0) == '/' ) {
+    if (filename.charAt(0) == '/') {
       filename = filename.substring(1);
     }
     String pathOps = MessageFormat.format(
@@ -216,7 +216,7 @@ public class TestHttpFSServerNoACLs extends HTestCase {
     conn.setRequestMethod("PUT");
     conn.connect();
     int resp = conn.getResponseCode();
-    if ( expectOK ) {
+    if (expectOK) {
       Assert.assertEquals(HttpURLConnection.HTTP_OK, resp);
     } else {
       Assert.assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR, resp);
@@ -229,6 +229,7 @@ public class TestHttpFSServerNoACLs extends HTestCase {
   }
 
   /**
+   * Test without ACLs.
    * Ensure that
    * <ol>
    *   <li>GETFILESTATUS and LISTSTATUS work happily</li>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c98201b5/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoXAttrs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoXAttrs.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoXAttrs.java
index 7571125..270989b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoXAttrs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoXAttrs.java
@@ -111,7 +111,7 @@ public class TestHttpFSServerNoXAttrs extends HTestCase {
 
     // HDFS configuration
     File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
-    if ( !hadoopConfDir.mkdirs() ) {
+    if (!hadoopConfDir.mkdirs()) {
       throw new IOException();
     }
 
@@ -147,7 +147,7 @@ public class TestHttpFSServerNoXAttrs extends HTestCase {
 
     ClassLoader cl = Thread.currentThread().getContextClassLoader();
     URL url = cl.getResource("webapp");
-    if ( url == null ) {
+    if (url == null) {
       throw new IOException();
     }
     WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
@@ -168,7 +168,7 @@ public class TestHttpFSServerNoXAttrs extends HTestCase {
           throws Exception {
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     // Remove leading / from filename
-    if ( filename.charAt(0) == '/' ) {
+    if (filename.charAt(0) == '/') {
       filename = filename.substring(1);
     }
     String pathOps = MessageFormat.format(
@@ -197,7 +197,7 @@ public class TestHttpFSServerNoXAttrs extends HTestCase {
                       String params) throws Exception {
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
     // Remove leading / from filename
-    if ( filename.charAt(0) == '/' ) {
+    if (filename.charAt(0) == '/') {
       filename = filename.substring(1);
     }
     String pathOps = MessageFormat.format(
@@ -245,4 +245,4 @@ public class TestHttpFSServerNoXAttrs extends HTestCase {
     putCmd(path, "SETXATTR", TestHttpFSServer.setXAttrParam(name1, value1));
     putCmd(path, "REMOVEXATTR", "xattr.name=" + name1);
   }
-}
\ No newline at end of file
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: HADOOP-14229. hadoop.security.auth_to_local example is incorrect in the documentation. Contributed by Andras Bokor.

Posted by xg...@apache.org.
HADOOP-14229. hadoop.security.auth_to_local example is incorrect in the documentation. Contributed by Andras Bokor.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/746189ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/746189ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/746189ad

Branch: refs/heads/YARN-5734
Commit: 746189ad8cdf90ab35baec9364b2e02956a1e70c
Parents: 480c8db
Author: Ravi Prakash <ra...@altiscale.com>
Authored: Fri Jul 28 11:43:36 2017 -0700
Committer: Ravi Prakash <ra...@altiscale.com>
Committed: Fri Jul 28 11:43:36 2017 -0700

----------------------------------------------------------------------
 .../hadoop-common/src/site/markdown/SecureMode.md           | 9 +++------
 1 file changed, 3 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/746189ad/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md b/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
index e1aad5a..5a62c4f 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
@@ -142,12 +142,9 @@ In a typical cluster HDFS and YARN services will be launched as the system `hdfs
     <property>
       <name>hadoop.security.auth_to_local</name>
       <value>
-        RULE:[2:$1@$0](nn/.*@.*REALM.TLD)s/.*/hdfs/
-        RULE:[2:$1@$0](jn/.*@.*REALM.TLD)s/.*/hdfs/
-        RULE:[2:$1@$0](dn/.*@.*REALM.TLD)s/.*/hdfs/
-        RULE:[2:$1@$0](nm/.*@.*REALM.TLD)s/.*/yarn/
-        RULE:[2:$1@$0](rm/.*@.*REALM.TLD)s/.*/yarn/
-        RULE:[2:$1@$0](jhs/.*@.*REALM.TLD)s/.*/mapred/
+        RULE:[2:$1/$2@$0]([ndj]n/.*@REALM.TLD)s/.*/hdfs/
+        RULE:[2:$1/$2@$0]([rn]m/.*@REALM.TLD)s/.*/yarn/
+        RULE:[2:$1/$2@$0](jhs/.*@REALM.TLD)s/.*/mapred/
         DEFAULT
       </value>
     </property>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: HADOOP-14518. Customize User-Agent header sent in HTTP/HTTPS requests by WASB. Contributed by Georgi Chalakov.

Posted by xg...@apache.org.
HADOOP-14518. Customize User-Agent header sent in HTTP/HTTPS requests by WASB. Contributed by Georgi Chalakov.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2921e51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2921e51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2921e51

Branch: refs/heads/YARN-5734
Commit: f2921e51f0fe613abce0a9f415a0d8ab6144aa6e
Parents: c98201b
Author: Jitendra Pandey <ji...@apache.org>
Authored: Mon Jul 24 13:59:27 2017 -0700
Committer: Jitendra Pandey <ji...@apache.org>
Committed: Mon Jul 24 23:01:01 2017 -0700

----------------------------------------------------------------------
 .../src/main/resources/core-default.xml         |  10 +-
 .../conf/TestCommonConfigurationFields.java     |   1 +
 .../fs/azure/AzureNativeFileSystemStore.java    | 144 +++++++++++--------
 .../hadoop-azure/src/site/markdown/index.md     |  13 ++
 .../fs/azure/TestWasbUriAndConfiguration.java   |  48 +++++++
 .../src/test/resources/azure-test.xml           |   5 +
 6 files changed, 162 insertions(+), 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2921e51/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 68b0a9d..d5ddc7f 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -499,7 +499,15 @@
     name to use for the service when the client wishes to make an RPC call.
   </description>
 </property>
-
+  <property>
+    <name>fs.azure.user.agent.prefix</name>
+    <value>unknown</value>
+    <description>
+      WASB passes User-Agent header to the Azure back-end. The default value
+      contains WASB version, Java Runtime version, Azure Client library version,
+      and the value of the configuration option fs.azure.user.agent.prefix.
+    </description>
+  </property>
 
 <property>
     <name>hadoop.security.uid.cache.secs</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2921e51/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 593254eb..ef74cba 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -116,6 +116,7 @@ public class TestCommonConfigurationFields extends TestConfigurationFieldsBase {
     xmlPropsToSkipCompare.add("fs.azure.secure.mode");
     xmlPropsToSkipCompare.add("fs.azure.authorization");
     xmlPropsToSkipCompare.add("fs.azure.authorization.caching.enable");
+    xmlPropsToSkipCompare.add("fs.azure.user.agent.prefix");
 
     // Deprecated properties.  These should eventually be removed from the
     // class.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2921e51/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 6b6f07a..7c198af 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.fs.azure.metrics.ResponseReceivedMetricUpdater;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.VersionInfo;
 import org.eclipse.jetty.util.ajax.JSON;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -71,6 +72,10 @@ import com.microsoft.azure.storage.StorageCredentialsAccountAndKey;
 import com.microsoft.azure.storage.StorageCredentialsSharedAccessSignature;
 import com.microsoft.azure.storage.StorageErrorCode;
 import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.Constants;
+import com.microsoft.azure.storage.StorageEvent;
+import com.microsoft.azure.storage.core.BaseRequest;
+import com.microsoft.azure.storage.SendingRequestEvent;
 import com.microsoft.azure.storage.blob.BlobListingDetails;
 import com.microsoft.azure.storage.blob.BlobProperties;
 import com.microsoft.azure.storage.blob.BlobRequestOptions;
@@ -83,13 +88,13 @@ import com.microsoft.azure.storage.core.Utility;
 
 /**
  * Core implementation of Windows Azure Filesystem for Hadoop.
- * Provides the bridging logic between Hadoop's abstract filesystem and Azure Storage 
+ * Provides the bridging logic between Hadoop's abstract filesystem and Azure Storage
  *
  */
 @InterfaceAudience.Private
 @VisibleForTesting
 public class AzureNativeFileSystemStore implements NativeFileSystemStore {
-  
+
   /**
    * Configuration knob on whether we do block-level MD5 validation on
    * upload/download.
@@ -102,6 +107,12 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   static final String DEFAULT_STORAGE_EMULATOR_ACCOUNT_NAME = "storageemulator";
   static final String STORAGE_EMULATOR_ACCOUNT_NAME_PROPERTY_NAME = "fs.azure.storage.emulator.account.name";
 
+  /**
+   * Configuration for User-Agent field.
+   */
+  static final String USER_AGENT_ID_KEY = "fs.azure.user.agent.prefix";
+  static final String USER_AGENT_ID_DEFAULT = "unknown";
+
   public static final Logger LOG = LoggerFactory.getLogger(AzureNativeFileSystemStore.class);
 
   private StorageInterface storageInteractionLayer;
@@ -133,15 +144,15 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   private static final String KEY_MAX_BACKOFF_INTERVAL = "fs.azure.io.retry.max.backoff.interval";
   private static final String KEY_BACKOFF_INTERVAL = "fs.azure.io.retry.backoff.interval";
   private static final String KEY_MAX_IO_RETRIES = "fs.azure.io.retry.max.retries";
-  
-  private static final String KEY_COPYBLOB_MIN_BACKOFF_INTERVAL = 
+
+  private static final String KEY_COPYBLOB_MIN_BACKOFF_INTERVAL =
     "fs.azure.io.copyblob.retry.min.backoff.interval";
-  private static final String KEY_COPYBLOB_MAX_BACKOFF_INTERVAL = 
+  private static final String KEY_COPYBLOB_MAX_BACKOFF_INTERVAL =
     "fs.azure.io.copyblob.retry.max.backoff.interval";
-  private static final String KEY_COPYBLOB_BACKOFF_INTERVAL = 
+  private static final String KEY_COPYBLOB_BACKOFF_INTERVAL =
     "fs.azure.io.copyblob.retry.backoff.interval";
-  private static final String KEY_COPYBLOB_MAX_IO_RETRIES = 
-    "fs.azure.io.copyblob.retry.max.retries";  
+  private static final String KEY_COPYBLOB_MAX_IO_RETRIES =
+    "fs.azure.io.copyblob.retry.max.retries";
 
   private static final String KEY_SELF_THROTTLE_ENABLE = "fs.azure.selfthrottling.enable";
   private static final String KEY_SELF_THROTTLE_READ_FACTOR = "fs.azure.selfthrottling.read.factor";
@@ -188,7 +199,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    * The set of directories where we should store files as page blobs.
    */
   private Set<String> pageBlobDirs;
-  
+
   /**
    * Configuration key to indicate the set of directories in WASB where
    * we should do atomic folder rename synchronized with createNonRecursive.
@@ -232,11 +243,11 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   private static final int DEFAULT_MAX_BACKOFF_INTERVAL = 30 * 1000; // 30s
   private static final int DEFAULT_BACKOFF_INTERVAL = 1 * 1000; // 1s
   private static final int DEFAULT_MAX_RETRY_ATTEMPTS = 15;
-  
+
   private static final int DEFAULT_COPYBLOB_MIN_BACKOFF_INTERVAL = 3  * 1000;
   private static final int DEFAULT_COPYBLOB_MAX_BACKOFF_INTERVAL = 90 * 1000;
   private static final int DEFAULT_COPYBLOB_BACKOFF_INTERVAL = 30 * 1000;
-  private static final int DEFAULT_COPYBLOB_MAX_RETRY_ATTEMPTS = 15;  
+  private static final int DEFAULT_COPYBLOB_MAX_RETRY_ATTEMPTS = 15;
 
   // Self-throttling defaults. Allowed range = (0,1.0]
   // Value of 1.0 means no self-throttling.
@@ -306,6 +317,9 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   private boolean useSecureMode = false;
   private boolean useLocalSasKeyMode = false;
 
+  // User-Agent
+  private String userAgentId;
+
   private String delegationToken;
 
   /** The error message template when container is not accessible. */
@@ -319,7 +333,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    * A test hook interface that can modify the operation context we use for
    * Azure Storage operations, e.g. to inject errors.
    */
-  @VisibleForTesting 
+  @VisibleForTesting
   interface TestHookOperationContext {
     OperationContext modifyOperationContext(OperationContext original);
   }
@@ -336,11 +350,11 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   /**
    * Add a test hook to modify the operation context we use for Azure Storage
    * operations.
-   * 
+   *
    * @param testHook
    *          The test hook, or null to unset previous hooks.
    */
-  @VisibleForTesting 
+  @VisibleForTesting
   void addTestHookToOperationContext(TestHookOperationContext testHook) {
     this.testHookOperationContext = testHook;
   }
@@ -358,7 +372,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   /**
    * Creates a JSON serializer that can serialize a PermissionStatus object into
    * the JSON string we want in the blob metadata.
-   * 
+   *
    * @return The JSON serializer.
    */
   private static JSON createPermissionJsonSerializer() {
@@ -425,7 +439,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
 
   /**
    * Check if concurrent reads and writes on the same blob are allowed.
-   * 
+   *
    * @return true if concurrent reads and OOB writes has been configured, false
    *         otherwise.
    */
@@ -437,11 +451,11 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    * Method for the URI and configuration object necessary to create a storage
    * session with an Azure session. It parses the scheme to ensure it matches
    * the storage protocol supported by this file system.
-   * 
+   *
    * @param uri - URI for target storage blob.
    * @param conf - reference to configuration object.
    * @param instrumentation - the metrics source that will keep track of operations here.
-   * 
+   *
    * @throws IllegalArgumentException if URI or job object is null, or invalid scheme.
    */
   @Override
@@ -504,6 +518,9 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
     pageBlobDirs = getDirectorySet(KEY_PAGE_BLOB_DIRECTORIES);
     LOG.debug("Page blob directories:  {}", setToString(pageBlobDirs));
 
+    // User-agent
+    userAgentId = conf.get(USER_AGENT_ID_KEY, USER_AGENT_ID_DEFAULT);
+
     // Extract directories that should have atomic rename applied.
     atomicRenameDirs = getDirectorySet(KEY_ATOMIC_RENAME_DIRECTORIES);
     String hbaseRoot;
@@ -539,7 +556,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
 
   /**
    * Method to extract the account name from an Azure URI.
-   * 
+   *
    * @param uri
    *          -- WASB blob URI
    * @returns accountName -- the account name for the URI.
@@ -590,7 +607,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
 
   /**
    * Method to extract the container name from an Azure URI.
-   * 
+   *
    * @param uri
    *          -- WASB blob URI
    * @returns containerName -- the container name for the URI. May be null.
@@ -641,7 +658,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   /**
    * Get the appropriate return the appropriate scheme for communicating with
    * Azure depending on whether wasb or wasbs is specified in the target URI.
-   * 
+   *
    * return scheme - HTTPS or HTTP as appropriate.
    */
   private String getHTTPScheme() {
@@ -663,7 +680,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   /**
    * Set the configuration parameters for this client storage session with
    * Azure.
-   * 
+   *
    * @throws AzureException
    */
   private void configureAzureStorageSession() throws AzureException {
@@ -763,10 +780,10 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
 
   /**
    * Connect to Azure storage using anonymous credentials.
-   * 
+   *
    * @param uri
    *          - URI to target blob (R/O access to public blob)
-   * 
+   *
    * @throws StorageException
    *           raised on errors communicating with Azure storage.
    * @throws IOException
@@ -893,7 +910,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
         STORAGE_EMULATOR_ACCOUNT_NAME_PROPERTY_NAME,
         DEFAULT_STORAGE_EMULATOR_ACCOUNT_NAME));
   }
-  
+
   @VisibleForTesting
   public static String getAccountKeyFromConfiguration(String accountName,
       Configuration conf) throws KeyProviderException {
@@ -930,7 +947,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    * Establish a session with Azure blob storage based on the target URI. The
    * method determines whether or not the URI target contains an explicit
    * account or an implicit default cluster-wide account.
-   * 
+   *
    * @throws AzureException
    * @throws IOException
    */
@@ -983,7 +1000,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
       instrumentation.setAccountName(accountName);
       String containerName = getContainerFromAuthority(sessionUri);
       instrumentation.setContainerName(containerName);
-      
+
       // Check whether this is a storage emulator account.
       if (isStorageEmulatorAccount(accountName)) {
         // It is an emulator account, connect to it with no credentials.
@@ -1086,7 +1103,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    */
   private String verifyAndConvertToStandardFormat(String rawDir) throws URISyntaxException {
     URI asUri = new URI(rawDir);
-    if (asUri.getAuthority() == null 
+    if (asUri.getAuthority() == null
         || asUri.getAuthority().toLowerCase(Locale.ENGLISH).equalsIgnoreCase(
       sessionUri.getAuthority().toLowerCase(Locale.ENGLISH))) {
       // Applies to me.
@@ -1167,8 +1184,8 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
     return false;
   }
 
-  
-  
+
+
   /**
    * This should be called from any method that does any modifications to the
    * underlying container: it makes sure to put the WASB current version in the
@@ -1364,11 +1381,11 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
        *      could mean either:
        *        (1) container=mycontainer; blob=myblob.txt
        *        (2) container=$root; blob=mycontainer/myblob.txt
-       * 
+       *
        * To avoid this type of ambiguity the Azure blob storage prevents
        * arbitrary path under $root. For a simple and more consistent user
        * experience it was decided to eliminate the opportunity for creating
-       * such paths by making the $root container read-only under WASB. 
+       * such paths by making the $root container read-only under WASB.
        */
 
       // Check that no attempt is made to write to blobs on default
@@ -1445,7 +1462,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
 
   /**
    * Default permission to use when no permission metadata is found.
-   * 
+   *
    * @return The default permission to use.
    */
   private static PermissionStatus defaultPermissionNoBlobMetadata() {
@@ -1688,7 +1705,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
 
   /**
    * Private method to check for authenticated access.
-   * 
+   *
    * @ returns boolean -- true if access is credentialed and authenticated and
    * false otherwise.
    */
@@ -1708,7 +1725,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    * original file system object was constructed with a short- or long-form URI.
    * If the root directory is non-null the URI in the file constructor was in
    * the long form.
-   * 
+   *
    * @param includeMetadata
    *          if set, the listed items will have their metadata populated
    *          already.
@@ -1717,7 +1734,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    *
    * @returns blobItems : iterable collection of blob items.
    * @throws URISyntaxException
-   * 
+   *
    */
   private Iterable<ListBlobItem> listRootBlobs(boolean includeMetadata,
       boolean useFlatBlobListing) throws StorageException, URISyntaxException {
@@ -1736,7 +1753,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    * the directory depending on whether the original file system object was
    * constructed with a short- or long-form URI. If the root directory is
    * non-null the URI in the file constructor was in the long form.
-   * 
+   *
    * @param aPrefix
    *          : string name representing the prefix of containing blobs.
    * @param includeMetadata
@@ -1744,10 +1761,10 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    *          already.
    * @param useFlatBlobListing
    *          if set the list is flat, otherwise it is hierarchical.
-   * 
+   *
    * @returns blobItems : iterable collection of blob items.
    * @throws URISyntaxException
-   * 
+   *
    */
   private Iterable<ListBlobItem> listRootBlobs(String aPrefix, boolean includeMetadata,
       boolean useFlatBlobListing) throws StorageException, URISyntaxException {
@@ -1769,7 +1786,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    * constructed with a short- or long-form URI. It also uses the specified flat
    * or hierarchical option, listing details options, request options, and
    * operation context.
-   * 
+   *
    * @param aPrefix
    *          string name representing the prefix of containing blobs.
    * @param useFlatBlobListing
@@ -1784,7 +1801,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    *          - context of the current operation
    * @returns blobItems : iterable collection of blob items.
    * @throws URISyntaxException
-   * 
+   *
    */
   private Iterable<ListBlobItem> listRootBlobs(String aPrefix, boolean useFlatBlobListing,
       EnumSet<BlobListingDetails> listingDetails, BlobRequestOptions options,
@@ -1804,13 +1821,13 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    * get the block blob reference depending on whether the original file system
    * object was constructed with a short- or long-form URI. If the root
    * directory is non-null the URI in the file constructor was in the long form.
-   * 
+   *
    * @param aKey
    *          : a key used to query Azure for the block blob.
    * @returns blob : a reference to the Azure block blob corresponding to the
    *          key.
    * @throws URISyntaxException
-   * 
+   *
    */
   private CloudBlobWrapper getBlobReference(String aKey)
       throws StorageException, URISyntaxException {
@@ -1831,10 +1848,10 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    * This private method normalizes the key by stripping the container name from
    * the path and returns a path relative to the root directory of the
    * container.
-   * 
+   *
    * @param keyUri
    *          - adjust this key to a path relative to the root directory
-   * 
+   *
    * @returns normKey
    */
   private String normalizeKey(URI keyUri) {
@@ -1853,11 +1870,11 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    * This private method normalizes the key by stripping the container name from
    * the path and returns a path relative to the root directory of the
    * container.
-   * 
+   *
    * @param blob
    *          - adjust the key to this blob to a path relative to the root
    *          directory
-   * 
+   *
    * @returns normKey
    */
   private String normalizeKey(CloudBlobWrapper blob) {
@@ -1868,11 +1885,11 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    * This private method normalizes the key by stripping the container name from
    * the path and returns a path relative to the root directory of the
    * container.
-   * 
+   *
    * @param directory
    *          - adjust the key to this directory to a path relative to the root
    *          directory
-   * 
+   *
    * @returns normKey
    */
   private String normalizeKey(CloudBlobDirectoryWrapper directory) {
@@ -1889,7 +1906,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    * operation that has listeners hooked to it that will update the metrics for
    * this file system. This method does not bind to receive send request
    * callbacks by default.
-   * 
+   *
    * @return The OperationContext object to use.
    */
   private OperationContext getInstrumentedContext() {
@@ -1900,16 +1917,27 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   /**
    * Creates a new OperationContext for the Azure Storage operation that has
    * listeners hooked to it that will update the metrics for this file system.
-   * 
+   *
    * @param bindConcurrentOOBIo
    *          - bind to intercept send request call backs to handle OOB I/O.
-   * 
+   *
    * @return The OperationContext object to use.
    */
   private OperationContext getInstrumentedContext(boolean bindConcurrentOOBIo) {
 
     OperationContext operationContext = new OperationContext();
 
+    // Set User-Agent
+    operationContext.getSendingRequestEventHandler().addListener(new StorageEvent<SendingRequestEvent>() {
+      @Override
+      public void eventOccurred(SendingRequestEvent eventArg) {
+        HttpURLConnection connection = (HttpURLConnection) eventArg.getConnectionObject();
+        String userAgentInfo = String.format(Utility.LOCALE_US, "WASB/%s (%s) %s",
+                VersionInfo.getVersion(), userAgentId, BaseRequest.getUserAgent());
+        connection.setRequestProperty(Constants.HeaderConstants.USER_AGENT, userAgentInfo);
+      }
+    });
+
     if (selfThrottlingEnabled) {
       SelfThrottlingIntercept.hook(operationContext, selfThrottlingReadFactor,
           selfThrottlingWriteFactor);
@@ -2096,7 +2124,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   /**
    * Searches the given list of {@link FileMetadata} objects for a directory
    * with the given key.
-   * 
+   *
    * @param list
    *          The list to search.
    * @param key
@@ -2229,7 +2257,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
    * Build up a metadata list of blobs in an Azure blob directory. This method
    * uses a in-order first traversal of blob directory structures to maintain
    * the sorted order of the blob names.
-   * 
+   *
    * @param aCloudBlobDirectory Azure blob directory
    * @param aFileMetadataList a list of file metadata objects for each
    *                          non-directory blob.
@@ -2564,7 +2592,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
       //
       // Copy blob operation in Azure storage is very costly. It will be highly
       // likely throttled during Azure storage gc. Short term fix will be using
-      // a more intensive exponential retry policy when the cluster is getting 
+      // a more intensive exponential retry policy when the cluster is getting
       // throttled.
       try {
         dstBlob.startCopyFromBlob(srcBlob, null, getInstrumentedContext());
@@ -2585,10 +2613,10 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
           int copyBlobMaxRetries = sessionConfiguration.getInt(
             KEY_COPYBLOB_MAX_IO_RETRIES,
 			DEFAULT_COPYBLOB_MAX_RETRY_ATTEMPTS);
-	        
+
           BlobRequestOptions options = new BlobRequestOptions();
           options.setRetryPolicyFactory(new RetryExponentialRetry(
-            copyBlobMinBackoff, copyBlobDeltaBackoff, copyBlobMaxBackoff, 
+            copyBlobMinBackoff, copyBlobDeltaBackoff, copyBlobMaxBackoff,
             copyBlobMaxRetries));
           dstBlob.startCopyFromBlob(srcBlob, options, getInstrumentedContext());
         } else {
@@ -2794,7 +2822,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
       bandwidthGaugeUpdater = null;
     }
   }
-  
+
   // Finalizer to ensure complete shutdown
   @Override
   protected void finalize() throws Throwable {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2921e51/hadoop-tools/hadoop-azure/src/site/markdown/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/index.md b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
index 7415e29..79cb0ea 100644
--- a/hadoop-tools/hadoop-azure/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
@@ -192,6 +192,19 @@ The configuration option `fs.azure.page.blob.extension.size` is the page blob
 extension size.  This defines the amount to extend a page blob if it starts to
 get full.  It must be 128MB or greater, specified as an integer number of bytes.
 
+### Custom User-Agent
+WASB passes User-Agent header to the Azure back-end. The default value
+contains WASB version, Java Runtime version, Azure Client library version, and the
+value of the configuration option `fs.azure.user.agent.prefix`. Customized User-Agent
+header enables better troubleshooting and analysis by Azure service.
+
+```xml
+<property>
+    <name>fs.azure.user.agent.prefix</name>
+    <value>Identifier</value>
+</property>
+```
+
 ### Atomic Folder Rename
 
 Azure storage stores files as a flat key/value store without formal support

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2921e51/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
index 194a831..672ed9c 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
@@ -566,4 +566,52 @@ public class TestWasbUriAndConfiguration {
         CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, null);
     assertEquals(newPath, effectivePath);
   }
+
+  @Test
+  public void testUserAgentConfig() throws Exception {
+    // Set the user agent
+    try {
+      testAccount = AzureBlobStorageTestAccount.createMock();
+      Configuration conf = testAccount.getFileSystem().getConf();
+      String authority = testAccount.getFileSystem().getUri().getAuthority();
+      URI defaultUri = new URI("wasbs", authority, null, null, null);
+      conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+      conf.set("fs.AbstractFileSystem.wasbs.impl", "org.apache.hadoop.fs.azure.Wasbs");
+
+      conf.set(AzureNativeFileSystemStore.USER_AGENT_ID_KEY, "TestClient");
+
+      FileSystem fs = FileSystem.get(conf);
+      AbstractFileSystem afs = FileContext.getFileContext(conf).getDefaultFileSystem();
+
+      assertTrue(afs instanceof Wasbs);
+      assertEquals(-1, afs.getUri().getPort());
+      assertEquals("wasbs", afs.getUri().getScheme());
+
+    } finally {
+      testAccount.cleanup();
+      FileSystem.closeAll();
+    }
+
+    // Unset the user agent
+    try {
+      testAccount = AzureBlobStorageTestAccount.createMock();
+      Configuration conf = testAccount.getFileSystem().getConf();
+      String authority = testAccount.getFileSystem().getUri().getAuthority();
+      URI defaultUri = new URI("wasbs", authority, null, null, null);
+      conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+      conf.set("fs.AbstractFileSystem.wasbs.impl", "org.apache.hadoop.fs.azure.Wasbs");
+
+      conf.unset(AzureNativeFileSystemStore.USER_AGENT_ID_KEY);
+
+      FileSystem fs = FileSystem.get(conf);
+      AbstractFileSystem afs = FileContext.getFileContext(conf).getDefaultFileSystem();
+      assertTrue(afs instanceof Wasbs);
+      assertEquals(-1, afs.getUri().getPort());
+      assertEquals("wasbs", afs.getUri().getScheme());
+
+    } finally {
+      testAccount.cleanup();
+      FileSystem.closeAll();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2921e51/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml b/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml
index acd9459..8c88743 100644
--- a/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml
+++ b/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml
@@ -34,6 +34,11 @@
     <value>true</value>
   </property>
 
+  <property>
+    <name>fs.azure.user.agent.prefix</name>
+    <value>MSFT</value>
+  </property>
+
   <!-- Save the above configuration properties in a separate file named -->
   <!-- azure-auth-keys.xml in the same directory as this file. -->
   <!-- DO NOT ADD azure-auth-keys.xml TO REVISION CONTROL.  The keys to your -->


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: HDFS-12190. Enable 'hdfs dfs -stat' to display access time. Contributed by Yongjun Zhang.

Posted by xg...@apache.org.
HDFS-12190. Enable 'hdfs dfs -stat' to display access time. Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6330f22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6330f22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6330f22

Branch: refs/heads/YARN-5734
Commit: c6330f22a5e5c2370bab885f9bea4bf8f5e9cf44
Parents: e3c7300
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Thu Jul 27 16:48:24 2017 -0700
Committer: Yongjun Zhang <yz...@cloudera.com>
Committed: Thu Jul 27 16:48:24 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/shell/Stat.java    | 19 ++++++++++++++-----
 .../src/site/markdown/FileSystemShell.md         |  4 ++--
 .../src/test/resources/testConf.xml              | 10 +++++++---
 .../org/apache/hadoop/hdfs/TestDFSShell.java     | 12 ++++++++++--
 4 files changed, 33 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6330f22/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
index cf8270e..8c624cc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
@@ -40,8 +40,10 @@ import org.apache.hadoop.fs.FileStatus;
  *   %o: Block size<br>
  *   %r: replication<br>
  *   %u: User name of owner<br>
- *   %y: UTC date as &quot;yyyy-MM-dd HH:mm:ss&quot;<br>
- *   %Y: Milliseconds since January 1, 1970 UTC<br>
+ *   %x: atime UTC date as &quot;yyyy-MM-dd HH:mm:ss&quot;<br>
+ *   %X: atime Milliseconds since January 1, 1970 UTC<br>
+ *   %y: mtime UTC date as &quot;yyyy-MM-dd HH:mm:ss&quot;<br>
+ *   %Y: mtime Milliseconds since January 1, 1970 UTC<br>
  * If the format is not specified, %y is used by default.
  */
 @InterfaceAudience.Private
@@ -62,9 +64,10 @@ class Stat extends FsCommand {
     "octal (%a) and symbolic (%A), filesize in" + NEWLINE +
     "bytes (%b), type (%F), group name of owner (%g)," + NEWLINE +
     "name (%n), block size (%o), replication (%r), user name" + NEWLINE +
-    "of owner (%u), modification date (%y, %Y)." + NEWLINE +
-    "%y shows UTC date as \"yyyy-MM-dd HH:mm:ss\" and" + NEWLINE +
-    "%Y shows milliseconds since January 1, 1970 UTC." + NEWLINE +
+    "of owner (%u), access date (%x, %X)." + NEWLINE +
+    "modification date (%y, %Y)." + NEWLINE +
+    "%x and %y show UTC date as \"yyyy-MM-dd HH:mm:ss\" and" + NEWLINE +
+    "%X and %Y show milliseconds since January 1, 1970 UTC." + NEWLINE +
     "If the format is not specified, %y is used by default." + NEWLINE;
 
   protected final SimpleDateFormat timeFmt;
@@ -127,6 +130,12 @@ class Stat extends FsCommand {
           case 'u':
             buf.append(stat.getOwner());
             break;
+          case 'x':
+            buf.append(timeFmt.format(new Date(stat.getAccessTime())));
+            break;
+          case 'X':
+            buf.append(stat.getAccessTime());
+            break;
           case 'y':
             buf.append(timeFmt.format(new Date(stat.getModificationTime())));
             break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6330f22/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index 0a594ab..71eec75 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -676,11 +676,11 @@ stat
 
 Usage: `hadoop fs -stat [format] <path> ...`
 
-Print statistics about the file/directory at \<path\> in the specified format. Format accepts permissions in octal (%a) and symbolic (%A), filesize in bytes (%b), type (%F), group name of owner (%g), name (%n), block size (%o), replication (%r), user name of owner(%u), and modification date (%y, %Y). %y shows UTC date as "yyyy-MM-dd HH:mm:ss" and %Y shows milliseconds since January 1, 1970 UTC. If the format is not specified, %y is used by default.
+Print statistics about the file/directory at \<path\> in the specified format. Format accepts permissions in octal (%a) and symbolic (%A), filesize in bytes (%b), type (%F), group name of owner (%g), name (%n), block size (%o), replication (%r), user name of owner(%u), access date(%x, %X), and modification date (%y, %Y). %x and %y show UTC date as "yyyy-MM-dd HH:mm:ss", and %X and %Y show milliseconds since January 1, 1970 UTC. If the format is not specified, %y is used by default.
 
 Example:
 
-* `hadoop fs -stat "%F %a %u:%g %b %y %n" /file`
+* `hadoop fs -stat "type:%F perm:%a %u:%g size:%b mtime:%y atime:%x name:%n" /file`
 
 Exit Code: Returns 0 on success and -1 on error.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6330f22/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index 64677f8..6a3d53a 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -919,15 +919,19 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*of owner \(%u\), modification date \(%y, %Y\).( )*</expected-output>
+          <expected-output>^( |\t)*of owner \(%u\), access date \(%x, %X\).( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*%y shows UTC date as "yyyy-MM-dd HH:mm:ss" and( )*</expected-output>
+          <expected-output>^( |\t)*modification date \(%y, %Y\).( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*%Y shows milliseconds since January 1, 1970 UTC.( )*</expected-output>
+          <expected-output>^( |\t)*%x and %y show UTC date as "yyyy-MM-dd HH:mm:ss" and( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*%X and %Y show milliseconds since January 1, 1970 UTC.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6330f22/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index c82c045..27d41b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -36,12 +36,12 @@ import java.util.zip.GZIPOutputStream;
 
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
+
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.log4j.Level;
 import org.junit.Test;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -65,6 +65,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.rules.Timeout;
 import org.junit.AfterClass;
@@ -115,6 +116,7 @@ public class TestDFSShell {
         GenericTestUtils.getTestDir("TestDFSShell").getAbsolutePath());
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1000);
 
     miniCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     miniCluster.waitActive();
@@ -2002,8 +2004,12 @@ public class TestDFSShell {
     DFSTestUtil.createFile(dfs, testFile2, 2 * BLOCK_SIZE, (short) 3, 0);
     final FileStatus status1 = dfs.getFileStatus(testDir1);
     final String mtime1 = fmt.format(new Date(status1.getModificationTime()));
+    final String atime1 = fmt.format(new Date(status1.getAccessTime()));
+    long now = Time.now();
+    dfs.setTimes(testFile2, now + 3000, now + 6000);
     final FileStatus status2 = dfs.getFileStatus(testFile2);
     final String mtime2 = fmt.format(new Date(status2.getModificationTime()));
+    final String atime2 = fmt.format(new Date(status2.getAccessTime()));
 
     final ByteArrayOutputStream out = new ByteArrayOutputStream();
     System.setOut(new PrintStream(out));
@@ -2036,17 +2042,19 @@ public class TestDFSShell {
         out.toString().contains(String.valueOf(octal)));
 
     out.reset();
-    doFsStat(dfs.getConf(), "%F %a %A %u:%g %b %y %n", testDir1, testFile2);
+    doFsStat(dfs.getConf(), "%F %a %A %u:%g %b %x %y %n", testDir1, testFile2);
 
     n = status2.getPermission().toShort();
     octal = (n>>>9&1)*1000 + (n>>>6&7)*100 + (n>>>3&7)*10 + (n&7);
     assertTrue(out.toString(), out.toString().contains(mtime1));
+    assertTrue(out.toString(), out.toString().contains(atime1));
     assertTrue(out.toString(), out.toString().contains("regular file"));
     assertTrue(out.toString(),
         out.toString().contains(status2.getPermission().toString()));
     assertTrue(out.toString(),
         out.toString().contains(String.valueOf(octal)));
     assertTrue(out.toString(), out.toString().contains(mtime2));
+    assertTrue(out.toString(), out.toString().contains(atime2));
   }
 
   private static void doFsStat(Configuration conf, String format, Path... files)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: YARN-6804. Allow custom hostname for docker containers in native services. Contributed by Billie Rinaldi

Posted by xg...@apache.org.
YARN-6804. Allow custom hostname for docker containers in native services. Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac9489f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac9489f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac9489f7

Branch: refs/heads/YARN-5734
Commit: ac9489f7fc2dd351fbe5be4b7a3add4782da81c3
Parents: a68b5b3
Author: Jian He <ji...@apache.org>
Authored: Mon Jul 24 21:08:10 2017 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 09:57:14 2017 -0700

----------------------------------------------------------------------
 .../hadoop-client-minicluster/pom.xml           |  4 ++
 .../client/binding/RegistryPathUtils.java       |  2 +-
 .../hadoop/registry/client/types/Endpoint.java  |  4 +-
 .../registry/client/types/ServiceRecord.java    |  4 +-
 .../hadoop-yarn-server-nodemanager/pom.xml      |  4 ++
 .../runtime/DockerLinuxContainerRuntime.java    | 67 +++++++++++++++-----
 .../linux/runtime/docker/DockerRunCommand.java  |  6 ++
 .../impl/container-executor.c                   |  4 ++
 .../test/test-container-executor.c              | 16 ++---
 .../runtime/TestDockerContainerRuntime.java     | 58 +++++++++++++----
 10 files changed, 125 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac9489f7/hadoop-client-modules/hadoop-client-minicluster/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 4512906..93811ad 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -395,6 +395,10 @@
         </exclusion>
         <exclusion>
           <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-registry</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-yarn-server-common</artifactId>
         </exclusion>
         <exclusion>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac9489f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java
index 5d8ea3f..5fa45f9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java
@@ -213,6 +213,6 @@ public class RegistryPathUtils {
    * @return a string suitable for use in registry paths.
    */
   public static String encodeYarnID(String yarnId) {
-    return yarnId.replace("_", "-");
+    return yarnId.replace("container", "ctr").replace("_", "-");
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac9489f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java
index 395f836..392884f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.registry.client.types;
 
 import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import com.fasterxml.jackson.databind.annotation.JsonSerialize;
+import com.fasterxml.jackson.annotation.JsonInclude;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -46,7 +46,7 @@ import java.util.Map;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 @JsonIgnoreProperties(ignoreUnknown = true)
-@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+@JsonInclude(JsonInclude.Include.NON_NULL)
 public final class Endpoint implements Cloneable {
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac9489f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java
index 674d6d3..d40866a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.registry.client.types;
 
 import com.fasterxml.jackson.annotation.JsonAnyGetter;
 import com.fasterxml.jackson.annotation.JsonAnySetter;
-import com.fasterxml.jackson.databind.annotation.JsonSerialize;
+import com.fasterxml.jackson.annotation.JsonInclude;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -37,7 +37,7 @@ import java.util.Map;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+@JsonInclude(JsonInclude.Include.NON_NULL)
 public class ServiceRecord implements Cloneable {
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac9489f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
index a0f4ef7..094519a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
@@ -52,6 +52,10 @@
       <artifactId>hadoop-yarn-api</artifactId>
     </dependency>
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-registry</artifactId>
+    </dependency>
+    <dependency>
       <groupId>javax.xml.bind</groupId>
       <artifactId>jaxb-api</artifactId>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac9489f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 8db03bc..e058d6e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.util.StringUtils;
@@ -101,6 +102,11 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
  *     property.
  *   </li>
  *   <li>
+ *     {@code YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_HOSTNAME} sets the
+ *     hostname to be used by the Docker container. If not specified, a
+ *     hostname will be derived from the container ID.
+ *   </li>
+ *   <li>
  *     {@code YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER}
  *     controls whether the Docker container is a privileged container. In order
  *     to use privileged containers, the
@@ -134,6 +140,10 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       "^(([a-zA-Z0-9.-]+)(:\\d+)?/)?([a-z0-9_./-]+)(:[\\w.-]+)?$";
   private static final Pattern dockerImagePattern =
       Pattern.compile(DOCKER_IMAGE_PATTERN);
+  public static final String HOSTNAME_PATTERN =
+      "^[a-zA-Z0-9][a-zA-Z0-9_.-]+$";
+  private static final Pattern hostnamePattern = Pattern.compile(
+      HOSTNAME_PATTERN);
 
   @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_IMAGE =
@@ -147,6 +157,10 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_NETWORK =
       "YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK";
+  @InterfaceAudience.Private
+  public static final String ENV_DOCKER_CONTAINER_HOSTNAME =
+      "YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_HOSTNAME";
+  @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER =
       "YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER";
   @InterfaceAudience.Private
@@ -211,9 +225,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     this.privilegedOperationExecutor = privilegedOperationExecutor;
 
     if (cGroupsHandler == null) {
-      if (LOG.isInfoEnabled()) {
-        LOG.info("cGroupsHandler is null - cgroups not in use.");
-      }
+      LOG.info("cGroupsHandler is null - cgroups not in use.");
     } else {
       this.cGroupsHandler = cGroupsHandler;
     }
@@ -267,6 +279,29 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     throw new ContainerExecutionException(msg);
   }
 
+  public static void validateHostname(String hostname) throws
+      ContainerExecutionException {
+    if (hostname != null && !hostname.isEmpty()) {
+      if (!hostnamePattern.matcher(hostname).matches()) {
+        throw new ContainerExecutionException("Hostname '" + hostname
+            + "' doesn't match docker hostname pattern");
+      }
+    }
+  }
+
+  /** Set a DNS friendly hostname. */
+  private void setHostname(DockerRunCommand runCommand, String
+      containerIdStr, String name)
+      throws ContainerExecutionException {
+    if (name == null || name.isEmpty()) {
+      name = RegistryPathUtils.encodeYarnID(containerIdStr);
+      validateHostname(name);
+    }
+
+    LOG.info("setting hostname in container to: " + name);
+    runCommand.setHostname(name);
+  }
+
   /**
    * If CGROUPS in enabled and not set to none, then set the CGROUP parent for
    * the command instance.
@@ -343,10 +378,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       return false;
     }
 
-    if (LOG.isInfoEnabled()) {
-      LOG.info("Privileged container requested for : " + container
-          .getContainerId().toString());
-    }
+    LOG.info("Privileged container requested for : " + container
+        .getContainerId().toString());
 
     //Ok, so we have been asked to run a privileged container. Security
     // checks need to be run. Each violation is an error.
@@ -375,10 +408,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       throw new ContainerExecutionException(message);
     }
 
-    if (LOG.isInfoEnabled()) {
-      LOG.info("All checks pass. Launching privileged container for : "
-          + container.getContainerId().toString());
-    }
+    LOG.info("All checks pass. Launching privileged container for : "
+        + container.getContainerId().toString());
 
     return true;
   }
@@ -413,6 +444,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
         .getEnvironment();
     String imageName = environment.get(ENV_DOCKER_CONTAINER_IMAGE);
     String network = environment.get(ENV_DOCKER_CONTAINER_NETWORK);
+    String hostname = environment.get(ENV_DOCKER_CONTAINER_HOSTNAME);
 
     if(network == null || network.isEmpty()) {
       network = defaultNetwork;
@@ -420,6 +452,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
 
     validateContainerNetworkType(network);
 
+    validateHostname(hostname);
+
     validateImageName(imageName);
 
     String containerIdStr = container.getContainerId().toString();
@@ -450,12 +484,13 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
         runAsUser, imageName)
         .detachOnRun()
         .setContainerWorkDir(containerWorkDir.toString())
-        .setNetworkType(network)
-        .setCapabilities(capabilities)
+        .setNetworkType(network);
+    setHostname(runCommand, containerIdStr, hostname);
+    runCommand.setCapabilities(capabilities)
         .addMountLocation(CGROUPS_ROOT_DIRECTORY,
             CGROUPS_ROOT_DIRECTORY + ":ro", false);
-    List<String> allDirs = new ArrayList<>(containerLocalDirs);
 
+    List<String> allDirs = new ArrayList<>(containerLocalDirs);
     allDirs.addAll(filecacheDirs);
     allDirs.add(containerWorkDir.toString());
     allDirs.addAll(containerLogDirs);
@@ -493,9 +528,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
         ENV_DOCKER_CONTAINER_RUN_OVERRIDE_DISABLE);
 
     if (disableOverride != null && disableOverride.equals("true")) {
-      if (LOG.isInfoEnabled()) {
-        LOG.info("command override disabled");
-      }
+      LOG.info("command override disabled");
     } else {
       List<String> overrideCommands = new ArrayList<>();
       Path launchDst =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac9489f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
index f79f4ed..b645754 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
@@ -91,6 +91,12 @@ public class DockerRunCommand extends DockerCommand {
 
     return this;
   }
+
+  public DockerRunCommand setHostname(String hostname) {
+    super.addCommandArguments("--hostname=" + hostname);
+    return this;
+  }
+
   public DockerRunCommand addDevice(String sourceDevice, String
       destinationDevice) {
     super.addCommandArguments("--device=" + sourceDevice + ":" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac9489f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 5d138f3..5070d62 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1215,6 +1215,7 @@ char* sanitize_docker_command(const char *line) {
     {"rm", no_argument, 0, 'r' },
     {"workdir", required_argument, 0, 'w' },
     {"net", required_argument, 0, 'e' },
+    {"hostname", required_argument, 0, 'h' },
     {"cgroup-parent", required_argument, 0, 'g' },
     {"privileged", no_argument, 0, 'p' },
     {"cap-add", required_argument, 0, 'a' },
@@ -1256,6 +1257,9 @@ char* sanitize_docker_command(const char *line) {
       case 'e':
         quote_and_append_arg(&output, &output_size, "--net=", optarg);
         break;
+      case 'h':
+        quote_and_append_arg(&output, &output_size, "--hostname=", optarg);
+        break;
       case 'v':
         quote_and_append_arg(&output, &output_size, "-v ", optarg);
         break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac9489f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 83d11ec..b7d0e44 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -1088,17 +1088,17 @@ void test_trim_function() {
 void test_sanitize_docker_command() {
 
   char *input[] = {
-    "run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
-    "run --name=$CID --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
-    "run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu || touch /tmp/file # bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
-    "run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu' || touch /tmp/file # bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
+    "run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --hostname=test.host.name --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
+    "run --name=$CID --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --hostname=test.host.name --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
+    "run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --hostname=test.host.name --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu || touch /tmp/file # bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
+    "run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --hostname=test.host.name --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu' || touch /tmp/file # bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
     "run ''''''''"
   };
   char *expected_output[] = {
-      "run --name='cname' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ",
-      "run --name='$CID' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ",
-      "run --name='cname' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu' '||' 'touch' '/tmp/file' '#' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ",
-      "run --name='cname' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu'\"'\"'' '||' 'touch' '/tmp/file' '#' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ",
+      "run --name='cname' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --hostname='test.host.name' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ",
+      "run --name='$CID' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --hostname='test.host.name' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ",
+      "run --name='cname' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --hostname='test.host.name' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu' '||' 'touch' '/tmp/file' '#' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ",
+      "run --name='cname' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --hostname='test.host.name' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu'\"'\"'' '||' 'touch' '/tmp/file' '#' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ",
       "run ''\"'\"''\"'\"''\"'\"''\"'\"''\"'\"''\"'\"''\"'\"''\"'\"'' ",
   };
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac9489f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index f611843..9894dcd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -69,6 +70,7 @@ public class TestDockerContainerRuntime {
   private PrivilegedOperationExecutor mockExecutor;
   private CGroupsHandler mockCGroupsHandler;
   private String containerId;
+  private String defaultHostname;
   private Container container;
   private ContainerId cId;
   private ContainerLaunchContext context;
@@ -108,6 +110,7 @@ public class TestDockerContainerRuntime {
         .mock(PrivilegedOperationExecutor.class);
     mockCGroupsHandler = Mockito.mock(CGroupsHandler.class);
     containerId = "container_id";
+    defaultHostname = RegistryPathUtils.encodeYarnID(containerId);
     container = mock(Container.class);
     cId = mock(ContainerId.class);
     context = mock(ContainerLaunchContext.class);
@@ -287,6 +290,7 @@ public class TestDockerContainerRuntime {
         .append("--user=%2$s -d ")
         .append("--workdir=%3$s ")
         .append("--net=host ")
+        .append("--hostname=" + defaultHostname + " ")
         .append(getExpectedTestCapabilitiesArgumentString())
         .append(getExpectedCGroupsMountString())
         .append("-v %4$s:%4$s ")
@@ -365,7 +369,7 @@ public class TestDockerContainerRuntime {
     String disallowedNetwork = "sdn" + Integer.toString(randEngine.nextInt());
 
     try {
-      env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK",
+      env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_NETWORK,
           disallowedNetwork);
       runtime.launchContainer(builder.build());
       Assert.fail("Network was expected to be disallowed: " +
@@ -378,8 +382,11 @@ public class TestDockerContainerRuntime {
         .DEFAULT_NM_DOCKER_ALLOWED_CONTAINER_NETWORKS.length;
     String allowedNetwork = YarnConfiguration
         .DEFAULT_NM_DOCKER_ALLOWED_CONTAINER_NETWORKS[randEngine.nextInt(size)];
-    env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK",
+    env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_NETWORK,
         allowedNetwork);
+    String expectedHostname = "test.hostname";
+    env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_HOSTNAME,
+        expectedHostname);
 
     //this should cause no failures.
 
@@ -393,6 +400,7 @@ public class TestDockerContainerRuntime {
         new StringBuffer("run --name=%1$s ").append("--user=%2$s -d ")
             .append("--workdir=%3$s ")
             .append("--net=" + allowedNetwork + " ")
+            .append("--hostname=" + expectedHostname + " ")
             .append(getExpectedTestCapabilitiesArgumentString())
             .append(getExpectedCGroupsMountString())
             .append("-v %4$s:%4$s ").append("-v %5$s:%5$s ")
@@ -448,6 +456,7 @@ public class TestDockerContainerRuntime {
         new StringBuffer("run --name=%1$s ").append("--user=%2$s -d ")
             .append("--workdir=%3$s ")
             .append("--net=" + customNetwork1 + " ")
+            .append("--hostname=" + defaultHostname + " ")
             .append(getExpectedTestCapabilitiesArgumentString())
             .append(getExpectedCGroupsMountString())
             .append("-v %4$s:%4$s ").append("-v %5$s:%5$s ")
@@ -471,7 +480,7 @@ public class TestDockerContainerRuntime {
     //now set an explicit (non-default) allowedNetwork and ensure that it is
     // used.
 
-    env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK",
+    env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_NETWORK,
         customNetwork2);
     runtime.launchContainer(builder.build());
 
@@ -485,6 +494,7 @@ public class TestDockerContainerRuntime {
         new StringBuffer("run --name=%1$s ").append("--user=%2$s -d ")
             .append("--workdir=%3$s ")
             .append("--net=" + customNetwork2 + " ")
+            .append("--hostname=" + defaultHostname + " ")
             .append(getExpectedTestCapabilitiesArgumentString())
             .append(getExpectedCGroupsMountString())
             .append("-v %4$s:%4$s ").append("-v %5$s:%5$s ")
@@ -505,7 +515,7 @@ public class TestDockerContainerRuntime {
 
     //disallowed network should trigger a launch failure
 
-    env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK",
+    env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_NETWORK,
         customNetwork3);
     try {
       runtime.launchContainer(builder.build());
@@ -524,8 +534,8 @@ public class TestDockerContainerRuntime {
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf);
 
-    env.put("YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER",
-        "invalid-value");
+    env.put(DockerLinuxContainerRuntime
+            .ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER, "invalid-value");
     runtime.launchContainer(builder.build());
 
     PrivilegedOperation op = capturePrivilegedOperationAndVerifyArgs();
@@ -552,8 +562,8 @@ public class TestDockerContainerRuntime {
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf);
 
-    env.put("YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER",
-        "true");
+    env.put(DockerLinuxContainerRuntime
+            .ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER, "true");
 
     try {
       runtime.launchContainer(builder.build());
@@ -575,8 +585,8 @@ public class TestDockerContainerRuntime {
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf);
 
-    env.put("YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER",
-        "true");
+    env.put(DockerLinuxContainerRuntime
+            .ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER, "true");
     //By default
     // yarn.nodemanager.runtime.linux.docker.privileged-containers.acl
     // is empty. So we expect this launch to fail.
@@ -605,8 +615,8 @@ public class TestDockerContainerRuntime {
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf);
 
-    env.put("YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER",
-        "true");
+    env.put(DockerLinuxContainerRuntime
+            .ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER, "true");
 
     try {
       runtime.launchContainer(builder.build());
@@ -632,8 +642,8 @@ public class TestDockerContainerRuntime {
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf);
 
-    env.put("YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER",
-        "true");
+    env.put(DockerLinuxContainerRuntime
+            .ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER, "true");
 
     runtime.launchContainer(builder.build());
     PrivilegedOperation op = capturePrivilegedOperationAndVerifyArgs();
@@ -927,4 +937,24 @@ public class TestDockerContainerRuntime {
       }
     }
   }
+
+  @Test
+  public void testDockerHostnamePattern() throws Exception {
+    String[] validNames = {"ab", "a.b.c.d", "a1-b.cd.ef", "0AB.", "C_D-"};
+
+    String[] invalidNames = {"a", "a#.b.c", "-a.b.c", "a@b.c", "a/b/c"};
+
+    for (String name : validNames) {
+      DockerLinuxContainerRuntime.validateHostname(name);
+    }
+
+    for (String name : invalidNames) {
+      try {
+        DockerLinuxContainerRuntime.validateHostname(name);
+        Assert.fail(name + " is an invalid hostname and should fail the regex");
+      } catch (ContainerExecutionException ce) {
+        continue;
+      }
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: HADOOP-11875. [JDK9] Adding a second copy of Hamlet without _ as a one-character identifier.

Posted by xg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java
index 6403e3b..c5117ed 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java
@@ -39,16 +39,15 @@ import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.SubView;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TFOOT;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.THEAD;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
-import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.InputType;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TFOOT;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.THEAD;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TR;
+import org.apache.hadoop.yarn.webapp.hamlet2.HamletSpec.InputType;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
-import com.google.common.base.Joiner;
 import com.google.inject.Inject;
 
 /**
@@ -110,7 +109,7 @@ public class HsTaskPage extends HsView {
       headRow.th("Elapsed Time").
               th(".note", "Note");
       
-       TBODY<TABLE<Hamlet>> tbody = headRow._()._().tbody();
+       TBODY<TABLE<Hamlet>> tbody = headRow.__().__().tbody();
        // Write all the data into a JavaScript array of arrays for JQuery
        // DataTables to display
        StringBuilder attemptsTableData = new StringBuilder("[\n");
@@ -182,55 +181,55 @@ public class HsTaskPage extends HsView {
        }
        attemptsTableData.append("]");
        html.script().$type("text/javascript").
-       _("var attemptsTableData=" + attemptsTableData)._();
+           __("var attemptsTableData=" + attemptsTableData).__();
 
-      TR<TFOOT<TABLE<Hamlet>>> footRow = tbody._().tfoot().tr();
+      TR<TFOOT<TABLE<Hamlet>>> footRow = tbody.__().tfoot().tr();
       footRow.
           th().input("search_init").$type(InputType.text).
-              $name("attempt_name").$value("Attempt")._()._().
+              $name("attempt_name").$value("Attempt").__().__().
           th().input("search_init").$type(InputType.text).
-              $name("attempt_state").$value("State")._()._().
+              $name("attempt_state").$value("State").__().__().
           th().input("search_init").$type(InputType.text).
-              $name("attempt_status").$value("Status")._()._().
+              $name("attempt_status").$value("Status").__().__().
           th().input("search_init").$type(InputType.text).
-              $name("attempt_node").$value("Node")._()._().
+              $name("attempt_node").$value("Node").__().__().
           th().input("search_init").$type(InputType.text).
-              $name("attempt_node").$value("Logs")._()._().
+              $name("attempt_node").$value("Logs").__().__().
           th().input("search_init").$type(InputType.text).
-              $name("attempt_start_time").$value("Start Time")._()._();
+              $name("attempt_start_time").$value("Start Time").__().__();
       
       if(type == TaskType.REDUCE) {
         footRow.
         th().input("search_init").$type(InputType.text).
-            $name("shuffle_time").$value("Shuffle Time")._()._();
+            $name("shuffle_time").$value("Shuffle Time").__().__();
         footRow.
         th().input("search_init").$type(InputType.text).
-            $name("merge_time").$value("Merge Time")._()._();
+            $name("merge_time").$value("Merge Time").__().__();
       }
       
       footRow.
         th().input("search_init").$type(InputType.text).
-            $name("attempt_finish").$value("Finish Time")._()._();
+            $name("attempt_finish").$value("Finish Time").__().__();
       
       if(type == TaskType.REDUCE) {
         footRow.
         th().input("search_init").$type(InputType.text).
-            $name("elapsed_shuffle_time").$value("Elapsed Shuffle Time")._()._();
+            $name("elapsed_shuffle_time").$value("Elapsed Shuffle Time").__().__();
         footRow.
         th().input("search_init").$type(InputType.text).
-            $name("elapsed_merge_time").$value("Elapsed Merge Time")._()._();
+            $name("elapsed_merge_time").$value("Elapsed Merge Time").__().__();
         footRow.
         th().input("search_init").$type(InputType.text).
-            $name("elapsed_reduce_time").$value("Elapsed Reduce Time")._()._();
+            $name("elapsed_reduce_time").$value("Elapsed Reduce Time").__().__();
       }
 
       footRow.
         th().input("search_init").$type(InputType.text).
-            $name("attempt_elapsed").$value("Elapsed Time")._()._().
+            $name("attempt_elapsed").$value("Elapsed Time").__().__().
         th().input("search_init").$type(InputType.text).
-            $name("note").$value("Note")._()._();
+            $name("note").$value("Note").__().__();
       
-      footRow._()._()._();
+      footRow.__().__().__();
     }
 
     protected String getAttemptId(TaskId taskId, TaskAttemptInfo ta) {
@@ -256,7 +255,7 @@ public class HsTaskPage extends HsView {
    * (non-Javadoc)
    * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
    */
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     //override the nav config from commonPReHead
     set(initID(ACCORDION, "nav"), "{autoHeight:false, active:2}");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java
index 9511c06..702c13c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java
@@ -28,14 +28,13 @@ import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TFOOT;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.THEAD;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
-import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.InputType;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TFOOT;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.THEAD;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TR;
+import org.apache.hadoop.yarn.webapp.hamlet2.HamletSpec.InputType;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -74,10 +73,10 @@ public class HsTasksBlock extends HtmlBlock {
     //Create the spanning row
     int attemptColSpan = type == TaskType.REDUCE ? 8 : 3;
     thead.tr().
-      th().$colspan(5).$class("ui-state-default")._("Task")._().
+      th().$colspan(5).$class("ui-state-default").__("Task").__().
       th().$colspan(attemptColSpan).$class("ui-state-default").
-        _("Successful Attempt")._().
-    _();
+        __("Successful Attempt").__().
+        __();
 
     TR<THEAD<TABLE<Hamlet>>> theadRow = thead.
           tr().
@@ -102,7 +101,7 @@ public class HsTasksBlock extends HtmlBlock {
     }
     theadRow.th("Elapsed Time"); //Attempt
 
-    TBODY<TABLE<Hamlet>> tbody = theadRow._()._().tbody();
+    TBODY<TABLE<Hamlet>> tbody = theadRow.__().__().tbody();
 
     // Write all the data into a JavaScript array of arrays for JQuery
     // DataTables to display
@@ -173,41 +172,41 @@ public class HsTasksBlock extends HtmlBlock {
     }
     tasksTableData.append("]");
     html.script().$type("text/javascript").
-    _("var tasksTableData=" + tasksTableData)._();
+        __("var tasksTableData=" + tasksTableData).__();
     
-    TR<TFOOT<TABLE<Hamlet>>> footRow = tbody._().tfoot().tr();
+    TR<TFOOT<TABLE<Hamlet>>> footRow = tbody.__().tfoot().tr();
     footRow.th().input("search_init").$type(InputType.text).$name("task")
-        .$value("ID")._()._().th().input("search_init").$type(InputType.text)
-        .$name("state").$value("State")._()._().th().input("search_init")
-        .$type(InputType.text).$name("start_time").$value("Start Time")._()._()
+        .$value("ID").__().__().th().input("search_init").$type(InputType.text)
+        .$name("state").$value("State").__().__().th().input("search_init")
+        .$type(InputType.text).$name("start_time").$value("Start Time").__().__()
         .th().input("search_init").$type(InputType.text).$name("finish_time")
-        .$value("Finish Time")._()._().th().input("search_init")
-        .$type(InputType.text).$name("elapsed_time").$value("Elapsed Time")._()
-        ._().th().input("search_init").$type(InputType.text)
-        .$name("attempt_start_time").$value("Start Time")._()._();
+        .$value("Finish Time").__().__().th().input("search_init")
+        .$type(InputType.text).$name("elapsed_time").$value("Elapsed Time").__()
+        .__().th().input("search_init").$type(InputType.text)
+        .$name("attempt_start_time").$value("Start Time").__().__();
 
     if(type == TaskType.REDUCE) {
       footRow.th().input("search_init").$type(InputType.text)
-          .$name("shuffle_time").$value("Shuffle Time")._()._();
+          .$name("shuffle_time").$value("Shuffle Time").__().__();
       footRow.th().input("search_init").$type(InputType.text)
-          .$name("merge_time").$value("Merge Time")._()._();
+          .$name("merge_time").$value("Merge Time").__().__();
     }
 
     footRow.th().input("search_init").$type(InputType.text)
-        .$name("attempt_finish").$value("Finish Time")._()._();
+        .$name("attempt_finish").$value("Finish Time").__().__();
 
     if(type == TaskType.REDUCE) {
       footRow.th().input("search_init").$type(InputType.text)
-          .$name("elapsed_shuffle_time").$value("Elapsed Shuffle Time")._()._();
+          .$name("elapsed_shuffle_time").$value("Elapsed Shuffle Time").__().__();
       footRow.th().input("search_init").$type(InputType.text)
-          .$name("elapsed_merge_time").$value("Elapsed Merge Time")._()._();
+          .$name("elapsed_merge_time").$value("Elapsed Merge Time").__().__();
       footRow.th().input("search_init").$type(InputType.text)
-          .$name("elapsed_reduce_time").$value("Elapsed Reduce Time")._()._();
+          .$name("elapsed_reduce_time").$value("Elapsed Reduce Time").__().__();
     }
 
     footRow.th().input("search_init").$type(InputType.text)
-        .$name("attempt_elapsed").$value("Elapsed Time")._()._();
+        .$name("attempt_elapsed").$value("Elapsed Time").__().__();
 
-    footRow._()._()._();
+    footRow.__().__().__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java
index 3c3386e..d088591 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java
@@ -41,7 +41,7 @@ public class HsTasksPage extends HsView {
    * (non-Javadoc)
    * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
    */
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     set(DATATABLES_ID, "tasks");
     set(DATATABLES_SELECTOR, ".dt-tasks" );

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java
index 8e39087..510ece6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java
@@ -39,7 +39,7 @@ public class HsView extends TwoColumnLayout {
    * (non-Javadoc)
    * @see org.apache.hadoop.yarn.webapp.view.TwoColumnLayout#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
    */
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     set(DATATABLES_ID, "jobs");
     set(initID(DATATABLES, "jobs"), jobsTableInit());
@@ -51,7 +51,7 @@ public class HsView extends TwoColumnLayout {
    * The prehead that should be common to all subclasses.
    * @param html used to render.
    */
-  protected void commonPreHead(Page.HTML<_> html) {
+  protected void commonPreHead(Page.HTML<__> html) {
     set(ACCORDION_ID, "nav");
     set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index 505e20f..f17cf8c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -341,4 +341,26 @@
       </plugin>
     </plugins>
   </build>
+  <!-- TODO: Remove this profile when hamlet was removed. -->
+  <profiles>
+    <profile>
+      <id>java9</id>
+      <activation>
+        <jdk>9</jdk>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <artifactId>maven-compiler-plugin</artifactId>
+            <configuration>
+              <excludes>
+                <exclude>src/main/java/org/apache/hadoop/yarn/webapp/hamlet/**</exclude>
+                <exclude>src/main/test/org/apache/haodop/yarn/webapp/hamlet/**</exclude>
+              </excludes>
+            </configuration>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
 </project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
index b04bc5d..94063ed 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
@@ -75,12 +75,12 @@ public class ResponseInfo implements Iterable<ResponseInfo.Item> {
     return about;
   }
 
-  public ResponseInfo _(String key, Object value) {
+  public ResponseInfo __(String key, Object value) {
     items.add(Item.of(key, value, false));
     return this;
   }
 
-  public ResponseInfo _(String key, String url, Object anchor) {
+  public ResponseInfo __(String key, String url, Object anchor) {
     if (url == null) {
       items.add(Item.of(key, anchor, false));
     } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/HelloWorld.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/HelloWorld.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/HelloWorld.java
index 9b5cbd1..4376b6e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/HelloWorld.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/HelloWorld.java
@@ -41,11 +41,11 @@ public class HelloWorld {
   }
 
   public static class HelloView extends HtmlPage {
-    @Override protected void render(Page.HTML<_> html) {
+    @Override protected void render(Page.HTML<__> html) {
       html. // produces valid html 4.01 strict
         title($("title")).
         p("#hello-for-css").
-          _($("title"))._()._();
+          __($("title")).__().__();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/MyApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/MyApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/MyApp.java
index 3973f68..e13a883 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/MyApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/MyApp.java
@@ -60,12 +60,12 @@ public class MyApp {
   public static class MyView extends HtmlPage {
     // You can inject the app in views if needed.
     @Override
-    public void render(Page.HTML<_> html) {
+    public void render(Page.HTML<__> html) {
       html.
         title("My App").
         p("#content_id_for_css_styling").
-          _("You can have", $("anything"))._()._();
-      // Note, there is no _(); (to parent element) method at root level.
+          __("You can have", $("anything")).__().__();
+      // Note, there is no __(); (to parent element) method at root level.
       // and IDE provides instant feedback on what level you're on in
       // the auto-completion drop-downs.
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java
index 58d9066..7213865 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java
@@ -29,6 +29,10 @@ import java.util.EnumSet;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.yarn.webapp.SubView;
 
+/**
+ * @deprecated Use org.apache.hadoop.yarn.webapp.hamlet2 package instead.
+ */
+@Deprecated
 @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
 public class Hamlet extends HamletImpl implements HamletSpec._Html {
   public Hamlet(PrintWriter out, int nestLevel, boolean wasInline) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java
index 5acb3f3..8a2db8f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java
@@ -43,7 +43,9 @@ import org.slf4j.LoggerFactory;
 /**
  * Generates a specific hamlet implementation class from a spec class
  * using a generic hamlet implementation class.
+ * @deprecated Use org.apache.hadoop.yarn.webapp.hamlet2 package instead.
  */
+@Deprecated
 @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
 public class HamletGen {
   static final Logger LOG = LoggerFactory.getLogger(HamletGen.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java
index d792d31..289ad70 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java
@@ -43,7 +43,9 @@ import org.apache.hadoop.yarn.webapp.WebAppException;
  * optimized to use a thread-local element pool.
  *
  * Prints HTML as it builds. So the order is important.
+ * @deprecated Use org.apache.hadoop.yarn.webapp.hamlet2 package instead.
  */
+@Deprecated
 @InterfaceAudience.Private
 public class HamletImpl extends HamletSpec {
   private static final String INDENT_CHARS = "  ";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletSpec.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletSpec.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletSpec.java
index 081516c..e3bb6d1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletSpec.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletSpec.java
@@ -43,7 +43,9 @@ import org.apache.hadoop.yarn.webapp.SubView;
  * UPPERCASE, corresponding to an element definition in the DTD. $lowercase is
  * used as attribute builder methods to differentiate from element builder
  * methods.
+ * @deprecated Use org.apache.hadoop.yarn.webapp.hamlet2 package instead.
  */
+@Deprecated
 @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
 public class HamletSpec {
   // The enum values are lowercase for better compression,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/package-info.java
index 3286ab5..4d0cf49 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/package-info.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/package-info.java
@@ -15,6 +15,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
+/**
+ * Deprecated.
+ * Use org.apache.hadoop.yarn.webapp.hamlet2 package instead.
+ */
+@Deprecated
 @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
 package org.apache.hadoop.yarn.webapp.hamlet;
 import org.apache.hadoop.classification.InterfaceAudience;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: YARN-6575. Support global configuration mutation in MutableConfProvider. (Jonathan Hung via Xuan Gong)

Posted by xg...@apache.org.
YARN-6575. Support global configuration mutation in MutableConfProvider. (Jonathan Hung via Xuan Gong)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e69113b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e69113b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e69113b4

Branch: refs/heads/YARN-5734
Commit: e69113b44fde67ad8005d556331c1ef466fceefa
Parents: fbaa345
Author: Xuan <xg...@apache.org>
Authored: Mon Jun 5 16:30:38 2017 -0700
Committer: Xuan <xg...@apache.org>
Committed: Mon Jul 31 08:59:24 2017 -0700

----------------------------------------------------------------------
 .../ConfigurationMutationACLPolicy.java         |   4 +-
 .../DefaultConfigurationMutationACLPolicy.java  |   4 +-
 .../scheduler/MutableConfScheduler.java         |   4 +-
 .../scheduler/MutableConfigurationProvider.java |   4 +-
 .../scheduler/capacity/CapacityScheduler.java   |   4 +-
 .../conf/MutableCSConfigurationProvider.java    |  10 +-
 ...ueueAdminConfigurationMutationACLPolicy.java |  22 +++-
 .../resourcemanager/webapp/RMWebServices.java   |   4 +-
 .../webapp/dao/QueueConfigsUpdateInfo.java      |  60 -----------
 .../webapp/dao/SchedConfUpdateInfo.java         |  69 +++++++++++++
 .../TestConfigurationMutationACLPolicies.java   |  28 ++++-
 .../TestMutableCSConfigurationProvider.java     |  10 +-
 .../TestRMWebServicesConfigurationMutation.java | 101 +++++++++++++------
 13 files changed, 205 insertions(+), 119 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e69113b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
index 724487b..3a388fe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
 
 /**
  * Interface for determining whether configuration mutations are allowed.
@@ -41,7 +41,7 @@ public interface ConfigurationMutationACLPolicy {
    * @param confUpdate configurations to be updated
    * @return whether provided mutation is allowed or not
    */
-  boolean isMutationAllowed(UserGroupInformation user, QueueConfigsUpdateInfo
+  boolean isMutationAllowed(UserGroupInformation user, SchedConfUpdateInfo
       confUpdate);
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e69113b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
index 680c3b8..6648668 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
 
 /**
  * Default configuration mutation ACL policy. Checks if user is YARN admin.
@@ -39,7 +39,7 @@ public class DefaultConfigurationMutationACLPolicy implements
 
   @Override
   public boolean isMutationAllowed(UserGroupInformation user,
-      QueueConfigsUpdateInfo confUpdate) {
+      SchedConfUpdateInfo confUpdate) {
     return authorizer.isAdmin(user);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e69113b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
index 93a935e..027d944 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
 
 import java.io.IOException;
 
@@ -36,7 +36,7 @@ public interface MutableConfScheduler extends ResourceScheduler {
    * @throws IOException if update is invalid
    */
   void updateConfiguration(UserGroupInformation user,
-      QueueConfigsUpdateInfo confUpdate) throws IOException;
+      SchedConfUpdateInfo confUpdate) throws IOException;
 
   /**
    * Get the scheduler configuration.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e69113b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index f04c128..6b8306c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
 
 import java.io.IOException;
 
@@ -34,7 +34,7 @@ public interface MutableConfigurationProvider {
    * @param confUpdate Key-value pairs for configurations to be updated.
    * @throws IOException if scheduler could not be reinitialized
    */
-  void mutateConfiguration(UserGroupInformation user, QueueConfigsUpdateInfo
+  void mutateConfiguration(UserGroupInformation user, SchedConfUpdateInfo
       confUpdate) throws IOException;
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e69113b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 5bcb352..6f637a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -137,7 +137,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.Placeme
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SimplePlacementSet;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AppPriorityACLsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
 import org.apache.hadoop.yarn.server.utils.Lock;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
@@ -2519,7 +2519,7 @@ public class CapacityScheduler extends
 
   @Override
   public void updateConfiguration(UserGroupInformation user,
-      QueueConfigsUpdateInfo confUpdate) throws IOException {
+      SchedConfUpdateInfo confUpdate) throws IOException {
     if (csConfProvider instanceof MutableConfigurationProvider) {
       ((MutableConfigurationProvider) csConfProvider).mutateConfiguration(
           user, confUpdate);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e69113b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
index 8b879b0..eb97260 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore.LogMutation;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -98,7 +98,7 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
 
   @Override
   public void mutateConfiguration(UserGroupInformation user,
-      QueueConfigsUpdateInfo confUpdate) throws IOException {
+      SchedConfUpdateInfo confUpdate) throws IOException {
     if (!aclMutationPolicy.isMutationAllowed(user, confUpdate)) {
       throw new AccessControlException("User is not admin of all modified" +
           " queues.");
@@ -126,7 +126,7 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
 
 
   private Map<String, String> constructKeyValueConfUpdate(
-      QueueConfigsUpdateInfo mutationInfo) throws IOException {
+      SchedConfUpdateInfo mutationInfo) throws IOException {
     CapacityScheduler cs = (CapacityScheduler) rmContext.getScheduler();
     CapacitySchedulerConfiguration proposedConf =
         new CapacitySchedulerConfiguration(cs.getConfiguration(), false);
@@ -140,6 +140,10 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
     for (QueueConfigInfo updateQueueInfo : mutationInfo.getUpdateQueueInfo()) {
       updateQueue(updateQueueInfo, proposedConf, confUpdate);
     }
+    for (Map.Entry<String, String> global : mutationInfo.getGlobalParams()
+        .entrySet()) {
+      confUpdate.put(global.getKey(), global.getValue());
+    }
     return confUpdate;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e69113b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
index 1f94c1c..0a82d50 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
@@ -22,15 +22,17 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ConfigurationMutationACLPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
 
 import java.io.IOException;
 import java.util.HashSet;
+import java.util.Map;
 import java.util.Set;
 
 /**
@@ -40,16 +42,29 @@ import java.util.Set;
 public class QueueAdminConfigurationMutationACLPolicy implements
     ConfigurationMutationACLPolicy {
 
+  private Configuration conf;
   private RMContext rmContext;
+  private YarnAuthorizationProvider authorizer;
 
   @Override
-  public void init(Configuration conf, RMContext context) {
+  public void init(Configuration config, RMContext context) {
+    this.conf = config;
     this.rmContext = context;
+    this.authorizer = YarnAuthorizationProvider.getInstance(conf);
   }
 
   @Override
   public boolean isMutationAllowed(UserGroupInformation user,
-      QueueConfigsUpdateInfo confUpdate) {
+      SchedConfUpdateInfo confUpdate) {
+    // If there are global config changes, check if user is admin.
+    Map<String, String> globalParams = confUpdate.getGlobalParams();
+    if (globalParams != null && globalParams.size() != 0) {
+      if (!authorizer.isAdmin(user)) {
+        return false;
+      }
+    }
+
+    // Check if user is admin of all modified queues.
     Set<String> queues = new HashSet<>();
     for (QueueConfigInfo addQueueInfo : confUpdate.getAddQueueInfo()) {
       queues.add(addQueueInfo.getQueue());
@@ -71,7 +86,6 @@ public class QueueAdminConfigurationMutationACLPolicy implements
         // Queue is not found, do nothing.
       }
       String parentPath = queuePath;
-      // TODO: handle global config change.
       while (queueInfo == null) {
         // We are adding a queue (whose parent we are possibly also adding).
         // Check ACL of lowest parent queue which already exists.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e69113b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index d670748..ae1ebad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -2409,11 +2409,11 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   }
 
   @PUT
-  @Path("/queues")
+  @Path("/sched-conf")
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
-  public Response updateSchedulerConfiguration(QueueConfigsUpdateInfo
+  public Response updateSchedulerConfiguration(SchedConfUpdateInfo
       mutationInfo, @Context HttpServletRequest hsr)
       throws AuthorizationException, InterruptedException {
     init();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e69113b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java
deleted file mode 100644
index 644ec90..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
-
-import java.util.ArrayList;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-/**
- * Information for making scheduler configuration changes (supports adding,
- * removing, or updating a queue).
- */
-@XmlRootElement(name = "schedConf")
-@XmlAccessorType(XmlAccessType.FIELD)
-public class QueueConfigsUpdateInfo {
-
-  @XmlElement(name = "add")
-  private ArrayList<QueueConfigInfo> addQueueInfo = new ArrayList<>();
-
-  @XmlElement(name = "remove")
-  private ArrayList<String> removeQueueInfo = new ArrayList<>();
-
-  @XmlElement(name = "update")
-  private ArrayList<QueueConfigInfo> updateQueueInfo = new ArrayList<>();
-
-  public QueueConfigsUpdateInfo() {
-    // JAXB needs this
-  }
-
-  public ArrayList<QueueConfigInfo> getAddQueueInfo() {
-    return addQueueInfo;
-  }
-
-  public ArrayList<String> getRemoveQueueInfo() {
-    return removeQueueInfo;
-  }
-
-  public ArrayList<QueueConfigInfo> getUpdateQueueInfo() {
-    return updateQueueInfo;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e69113b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
new file mode 100644
index 0000000..b7c585e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlElementWrapper;
+import javax.xml.bind.annotation.XmlRootElement;
+
+/**
+ * Information for making scheduler configuration changes (supports adding,
+ * removing, or updating a queue, as well as global scheduler conf changes).
+ */
+@XmlRootElement(name = "schedConf")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class SchedConfUpdateInfo {
+
+  @XmlElement(name = "add-queue")
+  private ArrayList<QueueConfigInfo> addQueueInfo = new ArrayList<>();
+
+  @XmlElement(name = "remove-queue")
+  private ArrayList<String> removeQueueInfo = new ArrayList<>();
+
+  @XmlElement(name = "update-queue")
+  private ArrayList<QueueConfigInfo> updateQueueInfo = new ArrayList<>();
+
+  private HashMap<String, String> global = new HashMap<>();
+
+  public SchedConfUpdateInfo() {
+    // JAXB needs this
+  }
+
+  public ArrayList<QueueConfigInfo> getAddQueueInfo() {
+    return addQueueInfo;
+  }
+
+  public ArrayList<String> getRemoveQueueInfo() {
+    return removeQueueInfo;
+  }
+
+  public ArrayList<QueueConfigInfo> getUpdateQueueInfo() {
+    return updateQueueInfo;
+  }
+
+  @XmlElementWrapper(name = "global-updates")
+  public HashMap<String, String> getGlobalParams() {
+    return global;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e69113b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
index 4016dcf..0f5a3d8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.QueueAdminConfigurationMutationACLPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -77,6 +77,7 @@ public class TestConfigurationMutationACLPolicies {
         .thenReturn(false);
     when(scheduler.getQueue(eq(queueName))).thenReturn(queue);
   }
+
   @Test
   public void testDefaultPolicy() {
     Configuration conf = new Configuration();
@@ -98,7 +99,7 @@ public class TestConfigurationMutationACLPolicies {
         ConfigurationMutationACLPolicy.class);
     policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
     policy.init(conf, rmContext);
-    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
     QueueConfigInfo configInfo = new QueueConfigInfo("root.a", EMPTY_MAP);
     updateInfo.getUpdateQueueInfo().add(configInfo);
     assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
@@ -114,7 +115,7 @@ public class TestConfigurationMutationACLPolicies {
     policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
     policy.init(conf, rmContext);
     // Add root.b.b1. Should check ACL of root.b queue.
-    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
     QueueConfigInfo configInfo = new QueueConfigInfo("root.b.b2", EMPTY_MAP);
     updateInfo.getAddQueueInfo().add(configInfo);
     assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
@@ -130,7 +131,7 @@ public class TestConfigurationMutationACLPolicies {
     policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
     policy.init(conf, rmContext);
     // Add root.b.b1.b11. Should check ACL of root.b queue.
-    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
     QueueConfigInfo configInfo = new QueueConfigInfo("root.b.b2.b21", EMPTY_MAP);
     updateInfo.getAddQueueInfo().add(configInfo);
     assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
@@ -146,9 +147,26 @@ public class TestConfigurationMutationACLPolicies {
     policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
     policy.init(conf, rmContext);
     // Remove root.b.b1.
-    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
     updateInfo.getRemoveQueueInfo().add("root.b.b1");
     assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
     assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
   }
+
+  @Test
+  public void testQueueAdminPolicyGlobal() {
+    Configuration conf = new Configuration();
+    conf.set(YarnConfiguration.YARN_ADMIN_ACL, GOOD_USER.getShortUserName());
+    conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+        QueueAdminConfigurationMutationACLPolicy.class,
+        ConfigurationMutationACLPolicy.class);
+    policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
+    policy.init(conf, rmContext);
+    SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
+    assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
+    assertTrue(policy.isMutationAllowed(BAD_USER, updateInfo));
+    updateInfo.getGlobalParams().put("globalKey", "globalValue");
+    assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
+    assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e69113b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
index 13229b1..3216781 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -47,8 +47,8 @@ public class TestMutableCSConfigurationProvider {
 
   private MutableCSConfigurationProvider confProvider;
   private RMContext rmContext;
-  private QueueConfigsUpdateInfo goodUpdate;
-  private QueueConfigsUpdateInfo badUpdate;
+  private SchedConfUpdateInfo goodUpdate;
+  private SchedConfUpdateInfo badUpdate;
   private CapacityScheduler cs;
 
   private static final UserGroupInformation TEST_USER = UserGroupInformation
@@ -62,14 +62,14 @@ public class TestMutableCSConfigurationProvider {
     when(cs.getConfiguration()).thenReturn(
         new CapacitySchedulerConfiguration());
     confProvider = new MutableCSConfigurationProvider(rmContext);
-    goodUpdate = new QueueConfigsUpdateInfo();
+    goodUpdate = new SchedConfUpdateInfo();
     Map<String, String> goodUpdateMap = new HashMap<>();
     goodUpdateMap.put("goodKey", "goodVal");
     QueueConfigInfo goodUpdateInfo = new
         QueueConfigInfo("root.a", goodUpdateMap);
     goodUpdate.getUpdateQueueInfo().add(goodUpdateInfo);
 
-    badUpdate = new QueueConfigsUpdateInfo();
+    badUpdate = new SchedConfUpdateInfo();
     Map<String, String> badUpdateMap = new HashMap<>();
     badUpdateMap.put("badKey", "badVal");
     QueueConfigInfo badUpdateInfo = new

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e69113b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
index d149055..5fbe36f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
 import org.apache.hadoop.yarn.webapp.JerseyTestBase;
@@ -162,7 +162,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
     ClientResponse response;
 
     // Add parent queue root.d with two children d1 and d2.
-    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
     Map<String, String> d1Capacity = new HashMap<>();
     d1Capacity.put(CapacitySchedulerConfiguration.CAPACITY, "25");
     d1Capacity.put(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY, "25");
@@ -181,9 +181,9 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
     updateInfo.getAddQueueInfo().add(d);
     response =
         r.path("ws").path("v1").path("cluster")
-            .path("queues").queryParam("user.name", userName)
+            .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
                 MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
 
@@ -205,7 +205,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
     ClientResponse response;
 
     // Add root.d with capacity 25, reducing root.b capacity from 75 to 50.
-    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
     Map<String, String> dCapacity = new HashMap<>();
     dCapacity.put(CapacitySchedulerConfiguration.CAPACITY, "25");
     Map<String, String> bCapacity = new HashMap<>();
@@ -216,9 +216,9 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
     updateInfo.getUpdateQueueInfo().add(b);
     response =
         r.path("ws").path("v1").path("cluster")
-            .path("queues").queryParam("user.name", userName)
+            .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
                 MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
 
@@ -238,13 +238,13 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
 
     stopQueue("root.a.a2");
     // Remove root.a.a2
-    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
     updateInfo.getRemoveQueueInfo().add("root.a.a2");
     response =
         r.path("ws").path("v1").path("cluster")
-            .path("queues").queryParam("user.name", userName)
+            .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
                 MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
 
@@ -263,13 +263,13 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
 
     stopQueue("root.c", "root.c.c1");
     // Remove root.c (parent queue)
-    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
     updateInfo.getRemoveQueueInfo().add("root.c");
     response =
         r.path("ws").path("v1").path("cluster")
-            .path("queues").queryParam("user.name", userName)
+            .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
                 MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
 
@@ -288,7 +288,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
 
     stopQueue("root.a", "root.a.a1", "root.a.a2");
     // Remove root.a (parent queue) with capacity 25
-    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
     updateInfo.getRemoveQueueInfo().add("root.a");
 
     // Set root.b capacity to 100
@@ -298,9 +298,9 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
     updateInfo.getUpdateQueueInfo().add(b);
     response =
         r.path("ws").path("v1").path("cluster")
-            .path("queues").queryParam("user.name", userName)
+            .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
                 MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
 
@@ -320,7 +320,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
 
     stopQueue("root.b", "root.c", "root.c.c1");
     // Remove root.b and root.c
-    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
     updateInfo.getRemoveQueueInfo().add("root.b");
     updateInfo.getRemoveQueueInfo().add("root.c");
     Map<String, String> aCapacity = new HashMap<>();
@@ -330,9 +330,9 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
     updateInfo.getUpdateQueueInfo().add(configInfo);
     response =
         r.path("ws").path("v1").path("cluster")
-            .path("queues").queryParam("user.name", userName)
+            .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
                 MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
 
@@ -348,7 +348,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
     ClientResponse response;
 
     // Set state of queues to STOPPED.
-    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
     Map<String, String> stoppedParam = new HashMap<>();
     stoppedParam.put(CapacitySchedulerConfiguration.STATE,
         QueueState.STOPPED.toString());
@@ -358,9 +358,9 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
     }
     response =
         r.path("ws").path("v1").path("cluster")
-            .path("queues").queryParam("user.name", userName)
+            .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
                 MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -378,7 +378,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
     ClientResponse response;
 
     // Update config value.
-    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
     Map<String, String> updateParam = new HashMap<>();
     updateParam.put(CapacitySchedulerConfiguration.MAXIMUM_AM_RESOURCE_SUFFIX,
         "0.2");
@@ -393,9 +393,9 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
         0.001f);
     response =
         r.path("ws").path("v1").path("cluster")
-            .path("queues").queryParam("user.name", userName)
+            .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
                 MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -411,9 +411,9 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
     updateInfo.getUpdateQueueInfo().add(aUpdateInfo);
     response =
         r.path("ws").path("v1").path("cluster")
-            .path("queues").queryParam("user.name", userName)
+            .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
                 MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -431,7 +431,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
     ClientResponse response;
 
     // Update root.a and root.b capacity to 50.
-    QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+    SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
     Map<String, String> updateParam = new HashMap<>();
     updateParam.put(CapacitySchedulerConfiguration.CAPACITY, "50");
     QueueConfigInfo aUpdateInfo = new QueueConfigInfo("root.a", updateParam);
@@ -441,9 +441,9 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
 
     response =
         r.path("ws").path("v1").path("cluster")
-            .path("queues").queryParam("user.name", userName)
+            .path("sched-conf").queryParam("user.name", userName)
             .accept(MediaType.APPLICATION_JSON)
-            .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
                 MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -453,6 +453,47 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
     assertEquals(50.0f, newCSConf.getNonLabeledQueueCapacity("root.b"), 0.01f);
   }
 
+  @Test
+  public void testGlobalConfChange() throws Exception {
+    WebResource r = resource();
+
+    ClientResponse response;
+
+    // Set maximum-applications to 30000.
+    SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
+    updateInfo.getGlobalParams().put(CapacitySchedulerConfiguration.PREFIX +
+        "maximum-applications", "30000");
+
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("sched-conf").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    assertEquals(30000, newCSConf.getMaximumSystemApplications());
+
+    updateInfo.getGlobalParams().put(CapacitySchedulerConfiguration.PREFIX +
+        "maximum-applications", null);
+    // Unset maximum-applications. Should be set to default.
+    response =
+        r.path("ws").path("v1").path("cluster")
+            .path("sched-conf").queryParam("user.name", userName)
+            .accept(MediaType.APPLICATION_JSON)
+            .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
+                MediaType.APPLICATION_JSON)
+            .put(ClientResponse.class);
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    newCSConf =
+        ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+    assertEquals(CapacitySchedulerConfiguration
+        .DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS,
+        newCSConf.getMaximumSystemApplications());
+  }
+
   @Override
   @After
   public void tearDown() throws Exception {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: HADOOP-11875. [JDK9] Adding a second copy of Hamlet without _ as a one-character identifier.

Posted by xg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMErrorsAndWarningsPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMErrorsAndWarningsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMErrorsAndWarningsPage.java
index 7475c4d..5e81ed5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMErrorsAndWarningsPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMErrorsAndWarningsPage.java
@@ -32,7 +32,7 @@ public class NMErrorsAndWarningsPage extends NMView {
   }
 
   @Override
-  protected void preHead(HtmlPage.Page.HTML<HtmlPage._> html) {
+  protected void preHead(HtmlPage.Page.HTML<__> html) {
     commonPreHead(html);
     String title = "Errors and Warnings in the NodeManager";
     setTitle(title);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMView.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMView.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMView.java
index dc21b4a..a76d2ef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMView.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMView.java
@@ -27,11 +27,11 @@ import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout;
 
 public class NMView extends TwoColumnLayout {
 
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
       commonPreHead(html);
     }
 
-  protected void commonPreHead(Page.HTML<_> html) {
+  protected void commonPreHead(Page.HTML<__> html) {
     set(ACCORDION_ID, "nav");
     set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
index 857a4f9..0a2731e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
@@ -24,7 +24,7 @@ import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
@@ -55,29 +55,29 @@ public class NavBlock extends HtmlBlock implements YarnWebParams {
         WebAppUtils.getResolvedRMWebAppURLWithScheme(this.conf);
 	  Hamlet.UL<Hamlet.DIV<Hamlet>> ul = html
       .div("#nav")
-      .h3()._("ResourceManager")._()
+      .h3().__("ResourceManager").__()
         .ul()
-          .li().a(RMWebAppURL, "RM Home")._()._()
-      .h3()._("NodeManager")._() // TODO: Problem if no header like this
+          .li().a(RMWebAppURL, "RM Home").__().__()
+      .h3().__("NodeManager").__() // TODO: Problem if no header like this
         .ul()
           .li()
-            .a(url("node"), "Node Information")._()
+            .a(url("node"), "Node Information").__()
           .li()
             .a(url("allApplications"), "List of Applications")
-            ._()
+            .__()
           .li()
-            .a(url("allContainers"), "List of Containers")._()
-        ._()
+            .a(url("allContainers"), "List of Containers").__()
+        .__()
       .h3("Tools")
         .ul()
-          .li().a("/conf", "Configuration")._()
-          .li().a("/logs", "Local logs")._()
-          .li().a("/stacks", "Server stacks")._()
-          .li().a("/jmx?qry=Hadoop:*", "Server metrics")._();
+          .li().a("/conf", "Configuration").__()
+          .li().a("/logs", "Local logs").__()
+          .li().a("/stacks", "Server stacks").__()
+          .li().a("/jmx?qry=Hadoop:*", "Server metrics").__();
     if (addErrorsAndWarningsLink) {
-      ul.li().a(url("errors-and-warnings"), "Errors/Warnings")._();
+      ul.li().a(url("errors-and-warnings"), "Errors/Warnings").__();
     }
-    ul._()._();
+    ul.__().__();
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java
index f51f0c5..7005f41 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.ResourceView;
 import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NodeInfo;
 import org.apache.hadoop.yarn.webapp.SubView;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.HTML;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
@@ -39,7 +39,7 @@ public class NodePage extends NMView {
   private static final long BYTES_IN_MB = 1024 * 1024;
 
   @Override
-  protected void commonPreHead(HTML<_> html) {
+  protected void commonPreHead(HTML<__> html) {
     super.commonPreHead(html);
 
     set(initID(ACCORDION, "nav"), "{autoHeight:false, active:1}");
@@ -65,29 +65,29 @@ public class NodePage extends NMView {
     protected void render(Block html) {
       NodeInfo info = new NodeInfo(this.context, this.resourceView);
       info("NodeManager information")
-          ._("Total Vmem allocated for Containers",
+          .__("Total Vmem allocated for Containers",
               StringUtils.byteDesc(info.getTotalVmemAllocated() * BYTES_IN_MB))
-          ._("Vmem enforcement enabled",
+          .__("Vmem enforcement enabled",
               info.isVmemCheckEnabled())
-          ._("Total Pmem allocated for Container",
+          .__("Total Pmem allocated for Container",
               StringUtils.byteDesc(info.getTotalPmemAllocated() * BYTES_IN_MB))
-          ._("Pmem enforcement enabled",
+          .__("Pmem enforcement enabled",
               info.isPmemCheckEnabled())
-          ._("Total VCores allocated for Containers",
+          .__("Total VCores allocated for Containers",
               String.valueOf(info.getTotalVCoresAllocated()))
-          ._("NodeHealthyStatus",
+          .__("NodeHealthyStatus",
               info.getHealthStatus())
-          ._("LastNodeHealthTime", new Date(
+          .__("LastNodeHealthTime", new Date(
               info.getLastNodeUpdateTime()))
-          ._("NodeHealthReport",
+          .__("NodeHealthReport",
               info.getHealthReport())
-          ._("NodeManager started on", new Date(
+          .__("NodeManager started on", new Date(
               info.getNMStartupTime()))
-          ._("NodeManager Version:", info.getNMBuildVersion() +
+          .__("NodeManager Version:", info.getNMBuildVersion() +
               " on " + info.getNMVersionBuiltOn())
-          ._("Hadoop Version:", info.getHadoopBuildVersion() +
+          .__("Hadoop Version:", info.getHadoopBuildVersion() +
               " on " + info.getHadoopVersionBuiltOn());
-      html._(InfoBlock.class);
+      html.__(InfoBlock.class);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java
index 4225afd..b7ce105 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java
@@ -37,22 +37,22 @@ public class AboutBlock extends HtmlBlock {
 
   @Override
   protected void render(Block html) {
-    html._(MetricsOverviewTable.class);
+    html.__(MetricsOverviewTable.class);
     ResourceManager rm = getInstance(ResourceManager.class);
     ClusterInfo cinfo = new ClusterInfo(rm);
     info("Cluster overview").
-      _("Cluster ID:", cinfo.getClusterId()).
-      _("ResourceManager state:", cinfo.getState()).
-      _("ResourceManager HA state:", cinfo.getHAState()).
-      _("ResourceManager HA zookeeper connection state:",
+        __("Cluster ID:", cinfo.getClusterId()).
+        __("ResourceManager state:", cinfo.getState()).
+        __("ResourceManager HA state:", cinfo.getHAState()).
+        __("ResourceManager HA zookeeper connection state:",
           cinfo.getHAZookeeperConnectionState()).
-      _("ResourceManager RMStateStore:", cinfo.getRMStateStore()).
-      _("ResourceManager started on:", Times.format(cinfo.getStartedOn())).
-      _("ResourceManager version:", cinfo.getRMBuildVersion() +
+        __("ResourceManager RMStateStore:", cinfo.getRMStateStore()).
+        __("ResourceManager started on:", Times.format(cinfo.getStartedOn())).
+        __("ResourceManager version:", cinfo.getRMBuildVersion() +
           " on " + cinfo.getRMVersionBuiltOn()).
-      _("Hadoop version:", cinfo.getHadoopBuildVersion() +
+        __("Hadoop version:", cinfo.getHadoopBuildVersion() +
           " on " + cinfo.getHadoopVersionBuiltOn());
-    html._(InfoBlock.class);
+    html.__(InfoBlock.class);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutPage.java
index ef0fdcf..f8c0406 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutPage.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.yarn.webapp.SubView;
 
 public class AboutPage extends RmView {
 
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
index 45f1887..89e2dec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams;
 public class AppAttemptPage extends RmView {
 
   @Override
-  protected void preHead(Page.HTML<_> html) {
+  protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
 
     String appAttemptId = $(YarnWebParams.APPLICATION_ATTEMPT_ID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppLogAggregationStatusPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppLogAggregationStatusPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppLogAggregationStatusPage.java
index ccb53dd..27fb43a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppLogAggregationStatusPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppLogAggregationStatusPage.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams;
 public class AppLogAggregationStatusPage extends RmView{
 
   @Override
-  protected void preHead(Page.HTML<_> html) {
+  protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     String appId = $(YarnWebParams.APPLICATION_ID);
     set(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java
index 0c5516a..7036f33 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams;
 public class AppPage extends RmView {
 
   @Override 
-  protected void preHead(Page.HTML<_> html) {
+  protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     String appId = $(YarnWebParams.APPLICATION_ID);
     set(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java
index 29889ec..fac100f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
-import org.apache.hadoop.yarn.server.webapp.AppsBlock;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 /**
@@ -26,7 +25,7 @@ import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
  */
 class AppsBlockWithMetrics extends HtmlBlock {
   @Override public void render(Block html) {
-    html._(MetricsOverviewTable.class);
-    html._(RMAppsBlock.class);
+    html.__(MetricsOverviewTable.class);
+    html.__(RMAppsBlock.class);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index 292c5f3..f3ab5b0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -47,12 +47,12 @@ import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
 import org.apache.hadoop.yarn.webapp.SubView;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.LI;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.LI;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.UL;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
@@ -105,7 +105,7 @@ class CapacitySchedulerPage extends RmView {
           info("\'" + lqinfo.getQueuePath().substring(5)
               + "\' Queue Status for Partition \'" + nodeLabelDisplay + "\'");
       renderQueueCapacityInfo(ri, nodeLabel);
-      html._(InfoBlock.class);
+      html.__(InfoBlock.class);
       // clear the info contents so this queue's info doesn't accumulate into
       // another queue's info
       ri.clear();
@@ -113,10 +113,10 @@ class CapacitySchedulerPage extends RmView {
       // second display the queue specific details :
       ri =
           info("\'" + lqinfo.getQueuePath().substring(5) + "\' Queue Status")
-              ._("Queue State:", lqinfo.getQueueState());
+              .__("Queue State:", lqinfo.getQueueState());
       renderCommonLeafQueueInfo(ri);
 
-      html._(InfoBlock.class);
+      html.__(InfoBlock.class);
       // clear the info contents so this queue's info doesn't accumulate into
       // another queue's info
       ri.clear();
@@ -125,10 +125,10 @@ class CapacitySchedulerPage extends RmView {
     private void renderLeafQueueInfoWithoutParition(Block html) {
       ResponseInfo ri =
           info("\'" + lqinfo.getQueuePath().substring(5) + "\' Queue Status")
-              ._("Queue State:", lqinfo.getQueueState());
+              .__("Queue State:", lqinfo.getQueueState());
       renderQueueCapacityInfo(ri, "");
       renderCommonLeafQueueInfo(ri);
-      html._(InfoBlock.class);
+      html.__(InfoBlock.class);
       // clear the info contents so this queue's info doesn't accumulate into
       // another queue's info
       ri.clear();
@@ -155,40 +155,40 @@ class CapacitySchedulerPage extends RmView {
           ? new ResourceInfo(Resources.none())
           : resourceUsages.getAmUsed();
       ri.
-      _("Used Capacity:", percent(capacities.getUsedCapacity() / 100)).
-      _("Configured Capacity:", percent(capacities.getCapacity() / 100)).
-      _("Configured Max Capacity:", percent(capacities.getMaxCapacity() / 100)).
-      _("Absolute Used Capacity:", percent(capacities.getAbsoluteUsedCapacity() / 100)).
-      _("Absolute Configured Capacity:", percent(capacities.getAbsoluteCapacity() / 100)).
-      _("Absolute Configured Max Capacity:", percent(capacities.getAbsoluteMaxCapacity() / 100)).
-      _("Used Resources:", resourceUsages.getUsed().toString()).
-      _("Configured Max Application Master Limit:", StringUtils.format("%.1f",
+          __("Used Capacity:", percent(capacities.getUsedCapacity() / 100)).
+          __("Configured Capacity:", percent(capacities.getCapacity() / 100)).
+          __("Configured Max Capacity:", percent(capacities.getMaxCapacity() / 100)).
+          __("Absolute Used Capacity:", percent(capacities.getAbsoluteUsedCapacity() / 100)).
+          __("Absolute Configured Capacity:", percent(capacities.getAbsoluteCapacity() / 100)).
+          __("Absolute Configured Max Capacity:", percent(capacities.getAbsoluteMaxCapacity() / 100)).
+          __("Used Resources:", resourceUsages.getUsed().toString()).
+          __("Configured Max Application Master Limit:", StringUtils.format("%.1f",
           capacities.getMaxAMLimitPercentage())).
-      _("Max Application Master Resources:",
+          __("Max Application Master Resources:",
           resourceUsages.getAMLimit().toString()).
-      _("Used Application Master Resources:",
+          __("Used Application Master Resources:",
           amUsed.toString()).
-      _("Max Application Master Resources Per User:",
+          __("Max Application Master Resources Per User:",
           userAMResourceLimit.toString());
     }
 
     private void renderCommonLeafQueueInfo(ResponseInfo ri) {
       ri.
-      _("Num Schedulable Applications:", Integer.toString(lqinfo.getNumActiveApplications())).
-      _("Num Non-Schedulable Applications:", Integer.toString(lqinfo.getNumPendingApplications())).
-      _("Num Containers:", Integer.toString(lqinfo.getNumContainers())).
-      _("Max Applications:", Integer.toString(lqinfo.getMaxApplications())).
-      _("Max Applications Per User:", Integer.toString(lqinfo.getMaxApplicationsPerUser())).
-      _("Configured Minimum User Limit Percent:", Integer.toString(lqinfo.getUserLimit()) + "%").
-      _("Configured User Limit Factor:", lqinfo.getUserLimitFactor()).
-      _("Accessible Node Labels:", StringUtils.join(",", lqinfo.getNodeLabels())).
-      _("Ordering Policy: ", lqinfo.getOrderingPolicyInfo()).
-      _("Preemption:", lqinfo.getPreemptionDisabled() ? "disabled" : "enabled").
-      _("Default Node Label Expression:",
+          __("Num Schedulable Applications:", Integer.toString(lqinfo.getNumActiveApplications())).
+          __("Num Non-Schedulable Applications:", Integer.toString(lqinfo.getNumPendingApplications())).
+          __("Num Containers:", Integer.toString(lqinfo.getNumContainers())).
+          __("Max Applications:", Integer.toString(lqinfo.getMaxApplications())).
+          __("Max Applications Per User:", Integer.toString(lqinfo.getMaxApplicationsPerUser())).
+          __("Configured Minimum User Limit Percent:", Integer.toString(lqinfo.getUserLimit()) + "%").
+          __("Configured User Limit Factor:", lqinfo.getUserLimitFactor()).
+          __("Accessible Node Labels:", StringUtils.join(",", lqinfo.getNodeLabels())).
+          __("Ordering Policy: ", lqinfo.getOrderingPolicyInfo()).
+          __("Preemption:", lqinfo.getPreemptionDisabled() ? "disabled" : "enabled").
+          __("Default Node Label Expression:",
               lqinfo.getDefaultNodeLabelExpression() == null
                   ? NodeLabel.DEFAULT_NODE_LABEL_PARTITION
                   : lqinfo.getDefaultNodeLabelExpression()).
-      _("Default Application Priority:",
+          __("Default Application Priority:",
               Integer.toString(lqinfo.getDefaultApplicationPriority()));
     }
   }
@@ -208,14 +208,14 @@ class CapacitySchedulerPage extends RmView {
     protected void render(Block html) {
       TBODY<TABLE<Hamlet>> tbody =
           html.table("#userinfo").thead().$class("ui-widget-header").tr().th()
-              .$class("ui-state-default")._("User Name")._().th()
-              .$class("ui-state-default")._("Max Resource")._().th()
-              .$class("ui-state-default")._("Weight")._().th()
-              .$class("ui-state-default")._("Used Resource")._().th()
-              .$class("ui-state-default")._("Max AM Resource")._().th()
-              .$class("ui-state-default")._("Used AM Resource")._().th()
-              .$class("ui-state-default")._("Schedulable Apps")._().th()
-              .$class("ui-state-default")._("Non-Schedulable Apps")._()._()._()
+              .$class("ui-state-default").__("User Name").__().th()
+              .$class("ui-state-default").__("Max Resource").__().th()
+              .$class("ui-state-default").__("Weight").__().th()
+              .$class("ui-state-default").__("Used Resource").__().th()
+              .$class("ui-state-default").__("Max AM Resource").__().th()
+              .$class("ui-state-default").__("Used AM Resource").__().th()
+              .$class("ui-state-default").__("Schedulable Apps").__().th()
+              .$class("ui-state-default").__("Non-Schedulable Apps").__().__().__()
               .tbody();
 
       ArrayList<UserInfo> users = lqinfo.getUsers().getUsersList();
@@ -240,11 +240,11 @@ class CapacitySchedulerPage extends RmView {
             .td(resourceUsages.getAMLimit().toString())
             .td(amUsed.toString())
             .td(Integer.toString(userInfo.getNumActiveApplications()))
-            .td(Integer.toString(userInfo.getNumPendingApplications()))._();
+            .td(Integer.toString(userInfo.getNumPendingApplications())).__();
       }
 
-      html.div().$class("usersinfo").h5("Active Users Info")._();
-      tbody._()._();
+      html.div().$class("usersinfo").h5("Active Users Info").__();
+      tbody.__().__();
     }
   }
 
@@ -288,25 +288,25 @@ class CapacitySchedulerPage extends RmView {
             a(_Q).$style(width(absMaxCap * Q_MAX_WIDTH)).
               $title(join("Absolute Capacity:", percent(absCap))).
               span().$style(join(Q_GIVEN, ";font-size:1px;", width(absCap/absMaxCap))).
-                _('.')._().
+            __('.').__().
               span().$style(join(width(absUsedCap/absMaxCap),
                 ";font-size:1px;left:0%;", absUsedCap > absCap ? Q_OVER : Q_UNDER)).
-                _('.')._().
-              span(".q", "Queue: "+info.getQueuePath().substring(5))._().
+            __('.').__().
+              span(".q", "Queue: "+info.getQueuePath().substring(5)).__().
             span().$class("qstats").$style(left(Q_STATS_POS)).
-              _(join(percent(used), " used"))._();
+            __(join(percent(used), " used")).__();
 
         csqinfo.qinfo = info;
         if (info.getQueues() == null) {
-          li.ul("#lq").li()._(LeafQueueInfoBlock.class)._()._();
-          li.ul("#lq").li()._(QueueUsersInfoBlock.class)._()._();
+          li.ul("#lq").li().__(LeafQueueInfoBlock.class).__().__();
+          li.ul("#lq").li().__(QueueUsersInfoBlock.class).__().__();
         } else {
-          li._(QueueBlock.class);
+          li.__(QueueBlock.class);
         }
-        li._();
+        li.__();
       }
 
-      ul._();
+      ul.__();
     }
   }
 
@@ -327,7 +327,7 @@ class CapacitySchedulerPage extends RmView {
 
     @Override
     public void render(Block html) {
-      html._(MetricsOverviewTable.class);
+      html.__(MetricsOverviewTable.class);
 
       UserGroupInformation callerUGI = this.getCallerUGI();
       boolean isAdmin = false;
@@ -347,10 +347,10 @@ class CapacitySchedulerPage extends RmView {
           .$style(
               "border-style: solid; border-color: #000000; border-width: 1px;"
                   + " cursor: hand; cursor: pointer; border-radius: 4px")
-          .$onclick("confirmAction()").b("Dump scheduler logs")._().select()
-          .$id("time").option().$value("60")._("1 min")._().option()
-          .$value("300")._("5 min")._().option().$value("600")._("10 min")._()
-          ._()._();
+          .$onclick("confirmAction()").b("Dump scheduler logs").__().select()
+          .$id("time").option().$value("60").__("1 min").__().option()
+          .$value("300").__("5 min").__().option().$value("600").__("10 min").__()
+          .__().__();
 
         StringBuilder script = new StringBuilder();
         script
@@ -377,36 +377,36 @@ class CapacitySchedulerPage extends RmView {
           .append(" console.log(data);").append(" });").append(" }")
           .append("}");
 
-        html.script().$type("text/javascript")._(script.toString())._();
+        html.script().$type("text/javascript").__(script.toString()).__();
       }
 
       UL<DIV<DIV<Hamlet>>> ul = html.
         div("#cs-wrapper.ui-widget").
           div(".ui-widget-header.ui-corner-top").
-            _("Application Queues")._().
+          __("Application Queues").__().
           div("#cs.ui-widget-content.ui-corner-bottom").
             ul();
       if (cs == null) {
         ul.
           li().
             a(_Q).$style(width(Q_MAX_WIDTH)).
-              span().$style(Q_END)._("100% ")._().
-              span(".q", "default")._()._();
+              span().$style(Q_END).__("100% ").__().
+              span(".q", "default").__().__();
       } else {
         ul.
           li().$style("margin-bottom: 1em").
-            span().$style("font-weight: bold")._("Legend:")._().
+            span().$style("font-weight: bold").__("Legend:").__().
             span().$class("qlegend ui-corner-all").$style(Q_GIVEN).
-              _("Capacity")._().
+            __("Capacity").__().
             span().$class("qlegend ui-corner-all").$style(Q_UNDER).
-              _("Used")._().
+            __("Used").__().
             span().$class("qlegend ui-corner-all").$style(Q_OVER).
-              _("Used (over capacity)")._().
+            __("Used (over capacity)").__().
             span().$class("qlegend ui-corner-all ui-state-default").
-              _("Max Capacity")._().
+              __("Max Capacity").__().
             span().$class("qlegend ui-corner-all").$style(ACTIVE_USER).
-            _("Users Requesting Resources")._().
-          _();
+            __("Users Requesting Resources").__().
+          __();
 
         float used = 0;
 
@@ -433,11 +433,11 @@ class CapacitySchedulerPage extends RmView {
           ul.li().
             a(_Q).$style(width(Q_MAX_WIDTH)).
               span().$style(join(width(used), ";left:0%;",
-                  used > 1 ? Q_OVER : Q_UNDER))._(".")._().
-              span(".q", "Queue: root")._().
+                  used > 1 ? Q_OVER : Q_UNDER)).__(".").__().
+              span(".q", "Queue: root").__().
             span().$class("qstats").$style(left(Q_STATS_POS)).
-              _(join(percent(used), " used"))._().
-            _(QueueBlock.class)._();
+              __(join(percent(used), " used")).__().
+              __(QueueBlock.class).__();
         } else {
           for (RMNodeLabel label : nodeLabelsInfo) {
             csqinfo.qinfo = null;
@@ -453,29 +453,29 @@ class CapacitySchedulerPage extends RmView {
             ul.li().
             a(_Q).$style(width(Q_MAX_WIDTH)).
               span().$style(join(width(used), ";left:0%;",
-                  used > 1 ? Q_OVER : Q_UNDER))._(".")._().
-              span(".q", partitionUiTag)._().
+                  used > 1 ? Q_OVER : Q_UNDER)).__(".").__().
+              span(".q", partitionUiTag).__().
             span().$class("qstats").$style(left(Q_STATS_POS)).
-              _(join(percent(used), " used"))._()._();
+                __(join(percent(used), " used")).__().__();
 
             //for the queue hierarchy under label
             UL<Hamlet> underLabel = html.ul("#pq");
             underLabel.li().
             a(_Q).$style(width(Q_MAX_WIDTH)).
               span().$style(join(width(used), ";left:0%;",
-                  used > 1 ? Q_OVER : Q_UNDER))._(".")._().
-              span(".q", "Queue: root")._().
+                  used > 1 ? Q_OVER : Q_UNDER)).__(".").__().
+              span(".q", "Queue: root").__().
             span().$class("qstats").$style(left(Q_STATS_POS)).
-              _(join(percent(used), " used"))._().
-            _(QueueBlock.class)._()._();
+                __(join(percent(used), " used")).__().
+                __(QueueBlock.class).__().__();
           }
         }
       }
-      ul._()._().
+      ul.__().__().
       script().$type("text/javascript").
-          _("$('#cs').hide();")._()._().
-      _(RMAppsBlock.class);
-      html._(HealthBlock.class);
+          __("$('#cs').hide();").__().__().
+          __(RMAppsBlock.class);
+      html.__(HealthBlock.class);
     }
   }
 
@@ -495,13 +495,13 @@ class CapacitySchedulerPage extends RmView {
       div.h4("Aggregate scheduler counts");
       TBODY<TABLE<DIV<Hamlet>>> tbody =
           div.table("#lastrun").thead().$class("ui-widget-header").tr().th()
-            .$class("ui-state-default")._("Total Container Allocations(count)")
-            ._().th().$class("ui-state-default")
-            ._("Total Container Releases(count)")._().th()
+            .$class("ui-state-default").__("Total Container Allocations(count)")
+            .__().th().$class("ui-state-default")
+            .__("Total Container Releases(count)").__().th()
             .$class("ui-state-default")
-            ._("Total Fulfilled Reservations(count)")._().th()
-            .$class("ui-state-default")._("Total Container Preemptions(count)")
-            ._()._()._().tbody();
+            .__("Total Fulfilled Reservations(count)").__().th()
+            .$class("ui-state-default").__("Total Container Preemptions(count)")
+            .__().__().__().tbody();
       tbody
         .$class("ui-widget-content")
         .tr()
@@ -512,15 +512,15 @@ class CapacitySchedulerPage extends RmView {
           String.valueOf(cs.getRootQueueMetrics()
             .getAggegatedReleasedContainers()))
         .td(healthInfo.getAggregateFulFilledReservationsCount().toString())
-        .td(healthInfo.getAggregatePreemptionCount().toString())._()._()._();
+        .td(healthInfo.getAggregatePreemptionCount().toString()).__().__().__();
       div.h4("Last scheduler run");
       tbody =
           div.table("#lastrun").thead().$class("ui-widget-header").tr().th()
-            .$class("ui-state-default")._("Time")._().th()
-            .$class("ui-state-default")._("Allocations(count - resources)")._()
-            .th().$class("ui-state-default")._("Reservations(count - resources)")
-            ._().th().$class("ui-state-default")._("Releases(count - resources)")
-            ._()._()._().tbody();
+            .$class("ui-state-default").__("Time").__().th()
+            .$class("ui-state-default").__("Allocations(count - resources)").__()
+            .th().$class("ui-state-default").__("Reservations(count - resources)")
+            .__().th().$class("ui-state-default").__("Releases(count - resources)")
+            .__().__().__().tbody();
       tbody
         .$class("ui-widget-content")
         .tr()
@@ -533,7 +533,7 @@ class CapacitySchedulerPage extends RmView {
               + healthInfo.getResourcesReserved().toString())
         .td(
           healthInfo.getReleaseCount().toString() + " - "
-              + healthInfo.getResourcesReleased().toString())._()._()._();
+              + healthInfo.getResourcesReleased().toString()).__().__().__();
       Map<String, SchedulerHealth.DetailedInformation> info = new HashMap<>();
       info.put("Allocation", healthInfo.getLastAllocationDetails());
       info.put("Reservation", healthInfo.getLastReservationDetails());
@@ -549,10 +549,10 @@ class CapacitySchedulerPage extends RmView {
         div.h4("Last " + entry.getKey());
         tbody =
             div.table(table).thead().$class("ui-widget-header").tr().th()
-              .$class("ui-state-default")._("Time")._().th()
-              .$class("ui-state-default")._("Container Id")._().th()
-              .$class("ui-state-default")._("Node Id")._().th()
-              .$class("ui-state-default")._("Queue")._()._()._().tbody();
+              .$class("ui-state-default").__("Time").__().th()
+              .$class("ui-state-default").__("Container Id").__().th()
+              .$class("ui-state-default").__("Node Id").__().th()
+              .$class("ui-state-default").__("Queue").__().__().__().tbody();
         SchedulerHealth.DetailedInformation di = entry.getValue();
         if (di.getTimestamp() != 0) {
           containerId = di.getContainerId().toString();
@@ -561,26 +561,26 @@ class CapacitySchedulerPage extends RmView {
         }
         tbody.$class("ui-widget-content").tr()
           .td(Times.format(di.getTimestamp())).td(containerId).td(nodeId)
-          .td(queue)._()._()._();
+          .td(queue).__().__().__();
       }
-      div._();
+      div.__();
     }
   }
 
-  @Override protected void postHead(Page.HTML<_> html) {
+  @Override protected void postHead(Page.HTML<__> html) {
     html.
       style().$type("text/css").
-        _("#cs { padding: 0.5em 0 1em 0; margin-bottom: 1em; position: relative }",
+        __("#cs { padding: 0.5em 0 1em 0; margin-bottom: 1em; position: relative }",
           "#cs ul { list-style: none }",
           "#cs a { font-weight: normal; margin: 2px; position: relative }",
           "#cs a span { font-weight: normal; font-size: 80% }",
           "#cs-wrapper .ui-widget-header { padding: 0.2em 0.5em }",
           ".qstats { font-weight: normal; font-size: 80%; position: absolute }",
           ".qlegend { font-weight: normal; padding: 0 1em; margin: 1em }",
-          "table.info tr th {width: 50%}")._(). // to center info table
+          "table.info tr th {width: 50%}").__(). // to center info table
       script("/static/jt/jquery.jstree.js").
       script().$type("text/javascript").
-        _("$(function() {",
+        __("$(function() {",
           "  $('#cs a span').addClass('ui-corner-all').css('position', 'absolute');",
           "  $('#cs').bind('loaded.jstree', function (e, data) {",
           "    var callback = { call:reopenQueryNodes }",
@@ -603,8 +603,8 @@ class CapacitySchedulerPage extends RmView {
           "    $('#apps').dataTable().fnFilter(q, 4, true);",
           "  });",
           "  $('#cs').show();",
-          "});")._().
-      _(SchedulerPageUtil.QueueBlockUtil.class);
+          "});").__().
+        __(SchedulerPageUtil.QueueBlockUtil.class);
   }
 
   @Override protected Class<? extends SubView> content() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java
index b8cd1ad..2cd209b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams;
 public class ContainerPage extends RmView {
 
   @Override
-  protected void preHead(Page.HTML<_> html) {
+  protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
 
     String containerId = $(YarnWebParams.CONTAINER_ID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
index d442064..0b0884b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
@@ -26,9 +26,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoSchedule
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo;
 import org.apache.hadoop.yarn.server.webapp.AppsBlock;
 import org.apache.hadoop.yarn.webapp.SubView;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.UL;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
@@ -53,16 +53,16 @@ class DefaultSchedulerPage extends RmView {
 
     @Override public void render(Block html) {
       info("\'" + sinfo.getQueueName() + "\' Queue Status").
-        _("Queue State:" , sinfo.getState()).
-        _("Minimum Queue Memory Capacity:" , Long.toString(sinfo.getMinQueueMemoryCapacity())).
-        _("Maximum Queue Memory Capacity:" , Long.toString(sinfo.getMaxQueueMemoryCapacity())).
-        _("Number of Nodes:" , Integer.toString(sinfo.getNumNodes())).
-        _("Used Node Capacity:" , Integer.toString(sinfo.getUsedNodeCapacity())).
-        _("Available Node Capacity:" , Integer.toString(sinfo.getAvailNodeCapacity())).
-        _("Total Node Capacity:" , Integer.toString(sinfo.getTotalNodeCapacity())).
-        _("Number of Node Containers:" , Integer.toString(sinfo.getNumContainers()));
-
-      html._(InfoBlock.class);
+          __("Queue State:" , sinfo.getState()).
+          __("Minimum Queue Memory Capacity:" , Long.toString(sinfo.getMinQueueMemoryCapacity())).
+          __("Maximum Queue Memory Capacity:" , Long.toString(sinfo.getMaxQueueMemoryCapacity())).
+          __("Number of Nodes:" , Integer.toString(sinfo.getNumNodes())).
+          __("Used Node Capacity:" , Integer.toString(sinfo.getUsedNodeCapacity())).
+          __("Available Node Capacity:" , Integer.toString(sinfo.getAvailNodeCapacity())).
+          __("Total Node Capacity:" , Integer.toString(sinfo.getTotalNodeCapacity())).
+          __("Number of Node Containers:" , Integer.toString(sinfo.getNumContainers()));
+
+      html.__(InfoBlock.class);
     }
   }
 
@@ -77,11 +77,11 @@ class DefaultSchedulerPage extends RmView {
 
     @Override
     public void render(Block html) {
-      html._(MetricsOverviewTable.class);
+      html.__(MetricsOverviewTable.class);
       UL<DIV<DIV<Hamlet>>> ul = html.
         div("#cs-wrapper.ui-widget").
           div(".ui-widget-header.ui-corner-top").
-            _("FifoScheduler Queue")._().
+          __("FifoScheduler Queue").__().
           div("#cs.ui-widget-content.ui-corner-bottom").
             ul();
 
@@ -89,8 +89,8 @@ class DefaultSchedulerPage extends RmView {
         ul.
           li().
             a(_Q).$style(width(WIDTH_F)).
-              span().$style(Q_END)._("100% ")._().
-              span(".q", "default")._()._();
+              span().$style(Q_END).__("100% ").__().
+              span(".q", "default").__().__();
       } else {
         float used = sinfo.getUsedCapacity();
         float set = sinfo.getCapacity();
@@ -99,33 +99,33 @@ class DefaultSchedulerPage extends RmView {
           li().
             a(_Q).$style(width(WIDTH_F)).
               $title(join("used:", percent(used))).
-              span().$style(Q_END)._("100%")._().
+              span().$style(Q_END).__("100%").__().
               span().$style(join(width(delta), ';', used > set ? OVER : UNDER,
-                ';', used > set ? left(set) : left(used)))._(".")._().
-              span(".q", sinfo.getQueueName())._().
-            _(QueueInfoBlock.class)._();
+                ';', used > set ? left(set) : left(used))).__(".").__().
+              span(".q", sinfo.getQueueName()).__().
+            __(QueueInfoBlock.class).__();
       }
 
-      ul._()._().
+      ul.__().__().
       script().$type("text/javascript").
-          _("$('#cs').hide();")._()._().
-      _(AppsBlock.class);
+          __("$('#cs').hide();").__().__().
+          __(AppsBlock.class);
     }
   }
 
 
-  @Override protected void postHead(Page.HTML<_> html) {
+  @Override protected void postHead(Page.HTML<__> html) {
     html.
       style().$type("text/css").
-        _("#cs { padding: 0.5em 0 1em 0; margin-bottom: 1em; position: relative }",
+        __("#cs { padding: 0.5em 0 1em 0; margin-bottom: 1em; position: relative }",
           "#cs ul { list-style: none }",
           "#cs a { font-weight: normal; margin: 2px; position: relative }",
           "#cs a span { font-weight: normal; font-size: 80% }",
           "#cs-wrapper .ui-widget-header { padding: 0.2em 0.5em }",
-          "table.info tr th {width: 50%}")._(). // to center info table
+          "table.info tr th {width: 50%}").__(). // to center info table
       script("/static/jt/jquery.jstree.js").
       script().$type("text/javascript").
-        _("$(function() {",
+        __("$(function() {",
           "  $('#cs a span').addClass('ui-corner-all').css('position', 'absolute');",
           "  $('#cs').bind('loaded.jstree', function (e, data) {",
           "    data.inst.open_all(); }).",
@@ -142,7 +142,7 @@ class DefaultSchedulerPage extends RmView {
           "    $('#apps').dataTable().fnFilter(q, 4);",
           "  });",
           "  $('#cs').show();",
-          "});")._();
+          "});").__();
   }
 
   @Override protected Class<? extends SubView> content() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ErrorBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ErrorBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ErrorBlock.java
index 963e53f..6fe5c3a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ErrorBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ErrorBlock.java
@@ -34,6 +34,6 @@ public class ErrorBlock extends HtmlBlock {
 
   @Override
   protected void render(Block html) {
-    html.p()._($(ERROR_MESSAGE))._();
+    html.p().__($(ERROR_MESSAGE)).__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
index b7a7a93..ac88f86 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
@@ -41,9 +41,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FairSchedulerInfo;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
@@ -98,7 +98,7 @@ public class FairSchedulerAppsBlock extends HtmlBlock {
             th(".reservedCpu", "Reserved CPU VCores").
             th(".reservedMemory", "Reserved Memory MB").
             th(".progress", "Progress").
-            th(".ui", "Tracking UI")._()._().
+            th(".ui", "Tracking UI").__().__().
         tbody();
     Collection<YarnApplicationState> reqAppStates = null;
     String reqStateString = $(APP_STATE);
@@ -168,8 +168,8 @@ public class FairSchedulerAppsBlock extends HtmlBlock {
     }
     appsTableData.append("]");
     html.script().$type("text/javascript").
-    _("var appsTableData=" + appsTableData)._();
+        __("var appsTableData=" + appsTableData).__();
 
-    tbody._()._();
+    tbody.__().__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
index 5f46841..ffa4594 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
@@ -31,10 +31,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FairSchedulerQue
 import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
 import org.apache.hadoop.yarn.webapp.SubView;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.LI;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.LI;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.UL;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
@@ -70,21 +70,21 @@ public class FairSchedulerPage extends RmView {
     @Override
     protected void render(Block html) {
       ResponseInfo ri = info("\'" + qinfo.getQueueName() + "\' Queue Status").
-          _("Used Resources:", qinfo.getUsedResources().toString()).
-          _("Demand Resources:", qinfo.getDemandResources().toString()).
-          _("Num Active Applications:", qinfo.getNumActiveApplications()).
-          _("Num Pending Applications:", qinfo.getNumPendingApplications()).
-          _("Min Resources:", qinfo.getMinResources().toString()).
-          _("Max Resources:", qinfo.getMaxResources().toString()).
-          _("Reserved Resources:", qinfo.getReservedResources().toString());
+          __("Used Resources:", qinfo.getUsedResources().toString()).
+          __("Demand Resources:", qinfo.getDemandResources().toString()).
+          __("Num Active Applications:", qinfo.getNumActiveApplications()).
+          __("Num Pending Applications:", qinfo.getNumPendingApplications()).
+          __("Min Resources:", qinfo.getMinResources().toString()).
+          __("Max Resources:", qinfo.getMaxResources().toString()).
+          __("Reserved Resources:", qinfo.getReservedResources().toString());
       int maxApps = qinfo.getMaxApplications();
       if (maxApps < Integer.MAX_VALUE) {
-          ri._("Max Running Applications:", qinfo.getMaxApplications());
+        ri.__("Max Running Applications:", qinfo.getMaxApplications());
       }
-      ri._(STEADY_FAIR_SHARE + ":", qinfo.getSteadyFairShare().toString());
-      ri._(INSTANTANEOUS_FAIR_SHARE + ":", qinfo.getFairShare().toString());
-      ri._("Preemptable:", qinfo.isPreemptable());
-      html._(InfoBlock.class);
+      ri.__(STEADY_FAIR_SHARE + ":", qinfo.getSteadyFairShare().toString());
+      ri.__(INSTANTANEOUS_FAIR_SHARE + ":", qinfo.getFairShare().toString());
+      ri.__("Preemptable:", qinfo.isPreemptable());
+      html.__(InfoBlock.class);
 
       // clear the info contents so this queue's info doesn't accumulate into another queue's info
       ri.clear();
@@ -102,17 +102,17 @@ public class FairSchedulerPage extends RmView {
     @Override
     protected void render(Block html) {
       ResponseInfo ri = info("\'" + qinfo.getQueueName() + "\' Queue Status").
-          _("Used Resources:", qinfo.getUsedResources().toString()).
-          _("Min Resources:", qinfo.getMinResources().toString()).
-          _("Max Resources:", qinfo.getMaxResources().toString()).
-          _("Reserved Resources:", qinfo.getReservedResources().toString());
+          __("Used Resources:", qinfo.getUsedResources().toString()).
+          __("Min Resources:", qinfo.getMinResources().toString()).
+          __("Max Resources:", qinfo.getMaxResources().toString()).
+          __("Reserved Resources:", qinfo.getReservedResources().toString());
       int maxApps = qinfo.getMaxApplications();
       if (maxApps < Integer.MAX_VALUE) {
-          ri._("Max Running Applications:", qinfo.getMaxApplications());
+        ri.__("Max Running Applications:", qinfo.getMaxApplications());
       }
-      ri._(STEADY_FAIR_SHARE + ":", qinfo.getSteadyFairShare().toString());
-      ri._(INSTANTANEOUS_FAIR_SHARE + ":", qinfo.getFairShare().toString());
-      html._(InfoBlock.class);
+      ri.__(STEADY_FAIR_SHARE + ":", qinfo.getSteadyFairShare().toString());
+      ri.__(INSTANTANEOUS_FAIR_SHARE + ":", qinfo.getFairShare().toString());
+      html.__(InfoBlock.class);
 
       // clear the info contents so this queue's info doesn't accumulate into another queue's info
       ri.clear();
@@ -141,28 +141,28 @@ public class FairSchedulerPage extends RmView {
               $title(join(join(STEADY_FAIR_SHARE + ":", percent(steadyFairShare)),
                   join(" " + INSTANTANEOUS_FAIR_SHARE + ":", percent(instantaneousFairShare)))).
               span().$style(join(Q_GIVEN, ";font-size:1px;", width(steadyFairShare / capacity))).
-                _('.')._().
+            __('.').__().
               span().$style(join(Q_INSTANTANEOUS_FS, ";font-size:1px;",
                   width(instantaneousFairShare/capacity))).
-                _('.')._().
+            __('.').__().
               span().$style(join(width(used/capacity),
                 ";font-size:1px;left:0%;", used > instantaneousFairShare ? Q_OVER : Q_UNDER)).
-                _('.')._().
-              span(".q", info.getQueueName())._().
+            __('.').__().
+              span(".q", info.getQueueName()).__().
             span().$class("qstats").$style(left(Q_STATS_POS)).
-              _(join(percent(used), " used"))._();
+            __(join(percent(used), " used")).__();
 
         fsqinfo.qinfo = info;
         if (info instanceof FairSchedulerLeafQueueInfo) {
-          li.ul("#lq").li()._(LeafQueueBlock.class)._()._();
+          li.ul("#lq").li().__(LeafQueueBlock.class).__().__();
         } else {
-          li.ul("#lq").li()._(ParentQueueBlock.class)._()._();
-          li._(QueueBlock.class);
+          li.ul("#lq").li().__(ParentQueueBlock.class).__().__();
+          li.__(QueueBlock.class);
         }
-        li._();
+        li.__();
       }
 
-      ul._();
+      ul.__();
     }
   }
   
@@ -177,19 +177,19 @@ public class FairSchedulerPage extends RmView {
 
     @Override
     public void render(Block html) {
-      html._(MetricsOverviewTable.class);
+      html.__(MetricsOverviewTable.class);
       UL<DIV<DIV<Hamlet>>> ul = html.
         div("#cs-wrapper.ui-widget").
           div(".ui-widget-header.ui-corner-top").
-            _("Application Queues")._().
+          __("Application Queues").__().
           div("#cs.ui-widget-content.ui-corner-bottom").
             ul();
       if (fs == null) {
         ul.
           li().
             a(_Q).$style(width(Q_MAX_WIDTH)).
-              span().$style(Q_END)._("100% ")._().
-              span(".q", "default")._()._();
+              span().$style(Q_END).__("100% ").__().
+              span(".q", "default").__().__();
       } else {
         FairSchedulerInfo sinfo = new FairSchedulerInfo(fs);
         fsqinfo.qinfo = sinfo.getRootQueueInfo();
@@ -197,52 +197,52 @@ public class FairSchedulerPage extends RmView {
 
         ul.
           li().$style("margin-bottom: 1em").
-            span().$style("font-weight: bold")._("Legend:")._().
+            span().$style("font-weight: bold").__("Legend:").__().
             span().$class("qlegend ui-corner-all").$style(Q_GIVEN).
               $title("The steady fair shares consider all queues, " +
                   "both active (with running applications) and inactive.").
-              _(STEADY_FAIR_SHARE)._().
+            __(STEADY_FAIR_SHARE).__().
             span().$class("qlegend ui-corner-all").$style(Q_INSTANTANEOUS_FS).
               $title("The instantaneous fair shares consider only active " +
                   "queues (with running applications).").
-              _(INSTANTANEOUS_FAIR_SHARE)._().
+            __(INSTANTANEOUS_FAIR_SHARE).__().
             span().$class("qlegend ui-corner-all").$style(Q_UNDER).
-              _("Used")._().
+            __("Used").__().
             span().$class("qlegend ui-corner-all").$style(Q_OVER).
-              _("Used (over fair share)")._().
+            __("Used (over fair share)").__().
             span().$class("qlegend ui-corner-all ui-state-default").
-              _("Max Capacity")._().
-        _().
+            __("Max Capacity").__().
+            __().
           li().
             a(_Q).$style(width(Q_MAX_WIDTH)).
               span().$style(join(width(used), ";left:0%;",
-                  used > 1 ? Q_OVER : Q_UNDER))._(".")._().
-              span(".q", "root")._().
+                  used > 1 ? Q_OVER : Q_UNDER)).__(".").__().
+              span(".q", "root").__().
             span().$class("qstats").$style(left(Q_STATS_POS)).
-              _(join(percent(used), " used"))._().
-            _(QueueBlock.class)._();
+            __(join(percent(used), " used")).__().
+            __(QueueBlock.class).__();
       }
-      ul._()._().
+      ul.__().__().
       script().$type("text/javascript").
-          _("$('#cs').hide();")._()._().
-      _(FairSchedulerAppsBlock.class);
+          __("$('#cs').hide();").__().__().
+          __(FairSchedulerAppsBlock.class);
     }
   }
   
-  @Override protected void postHead(Page.HTML<_> html) {
+  @Override protected void postHead(Page.HTML<__> html) {
     html.
       style().$type("text/css").
-        _("#cs { padding: 0.5em 0 1em 0; margin-bottom: 1em; position: relative }",
+        __("#cs { padding: 0.5em 0 1em 0; margin-bottom: 1em; position: relative }",
           "#cs ul { list-style: none }",
           "#cs a { font-weight: normal; margin: 2px; position: relative }",
           "#cs a span { font-weight: normal; font-size: 80% }",
           "#cs-wrapper .ui-widget-header { padding: 0.2em 0.5em }",
           ".qstats { font-weight: normal; font-size: 80%; position: absolute }",
           ".qlegend { font-weight: normal; padding: 0 1em; margin: 1em }",
-          "table.info tr th {width: 50%}")._(). // to center info table
+          "table.info tr th {width: 50%}").__(). // to center info table
       script("/static/jt/jquery.jstree.js").
       script().$type("text/javascript").
-        _("$(function() {",
+        __("$(function() {",
           "  $('#cs a span').addClass('ui-corner-all').css('position', 'absolute');",
           "  $('#cs').bind('loaded.jstree', function (e, data) {",
           "    var callback = { call:reopenQueryNodes }",
@@ -262,8 +262,8 @@ public class FairSchedulerPage extends RmView {
           "    $('#apps').dataTable().fnFilter(q, 4, true);",
           "  });",
           "  $('#cs').show();",
-          "});")._().
-        _(SchedulerPageUtil.QueueBlockUtil.class);
+          "});").__().
+        __(SchedulerPageUtil.QueueBlockUtil.class);
   }
   
   @Override protected Class<? extends SubView> content() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java
index fe7b247..f6b1a94 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java
@@ -24,8 +24,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsIn
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.UserMetricsInfo;
 
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -61,19 +61,19 @@ public class MetricsOverviewTable extends HtmlBlock {
     table("#metricsoverview").
     thead().$class("ui-widget-header").
       tr().
-        th().$class("ui-state-default")._("Apps Submitted")._().
-        th().$class("ui-state-default")._("Apps Pending")._().
-        th().$class("ui-state-default")._("Apps Running")._().
-        th().$class("ui-state-default")._("Apps Completed")._().
-        th().$class("ui-state-default")._("Containers Running")._().
-        th().$class("ui-state-default")._("Memory Used")._().
-        th().$class("ui-state-default")._("Memory Total")._().
-        th().$class("ui-state-default")._("Memory Reserved")._().
-        th().$class("ui-state-default")._("VCores Used")._().
-        th().$class("ui-state-default")._("VCores Total")._().
-        th().$class("ui-state-default")._("VCores Reserved")._().
-      _().
-    _().
+        th().$class("ui-state-default").__("Apps Submitted").__().
+        th().$class("ui-state-default").__("Apps Pending").__().
+        th().$class("ui-state-default").__("Apps Running").__().
+        th().$class("ui-state-default").__("Apps Completed").__().
+        th().$class("ui-state-default").__("Containers Running").__().
+        th().$class("ui-state-default").__("Memory Used").__().
+        th().$class("ui-state-default").__("Memory Total").__().
+        th().$class("ui-state-default").__("Memory Reserved").__().
+        th().$class("ui-state-default").__("VCores Used").__().
+        th().$class("ui-state-default").__("VCores Total").__().
+        th().$class("ui-state-default").__("VCores Reserved").__().
+        __().
+        __().
     tbody().$class("ui-widget-content").
       tr().
         td(String.valueOf(clusterMetrics.getAppsSubmitted())).
@@ -92,33 +92,33 @@ public class MetricsOverviewTable extends HtmlBlock {
         td(String.valueOf(clusterMetrics.getAllocatedVirtualCores())).
         td(String.valueOf(clusterMetrics.getTotalVirtualCores())).
         td(String.valueOf(clusterMetrics.getReservedVirtualCores())).
-      _().
-    _()._();
+        __().
+        __().__();
 
     div.h3("Cluster Nodes Metrics").
     table("#nodemetricsoverview").
     thead().$class("ui-widget-header").
       tr().
-        th().$class("ui-state-default")._("Active Nodes")._().
-        th().$class("ui-state-default")._("Decommissioning Nodes")._().
-        th().$class("ui-state-default")._("Decommissioned Nodes")._().
-        th().$class("ui-state-default")._("Lost Nodes")._().
-        th().$class("ui-state-default")._("Unhealthy Nodes")._().
-        th().$class("ui-state-default")._("Rebooted Nodes")._().
-        th().$class("ui-state-default")._("Shutdown Nodes")._().
-      _().
-    _().
+        th().$class("ui-state-default").__("Active Nodes").__().
+        th().$class("ui-state-default").__("Decommissioning Nodes").__().
+        th().$class("ui-state-default").__("Decommissioned Nodes").__().
+        th().$class("ui-state-default").__("Lost Nodes").__().
+        th().$class("ui-state-default").__("Unhealthy Nodes").__().
+        th().$class("ui-state-default").__("Rebooted Nodes").__().
+        th().$class("ui-state-default").__("Shutdown Nodes").__().
+        __().
+        __().
     tbody().$class("ui-widget-content").
       tr().
-        td().a(url("nodes"),String.valueOf(clusterMetrics.getActiveNodes()))._().
-        td().a(url("nodes/decommissioning"), String.valueOf(clusterMetrics.getDecommissioningNodes()))._().
-        td().a(url("nodes/decommissioned"),String.valueOf(clusterMetrics.getDecommissionedNodes()))._().
-        td().a(url("nodes/lost"),String.valueOf(clusterMetrics.getLostNodes()))._().
-        td().a(url("nodes/unhealthy"),String.valueOf(clusterMetrics.getUnhealthyNodes()))._().
-        td().a(url("nodes/rebooted"),String.valueOf(clusterMetrics.getRebootedNodes()))._().
-        td().a(url("nodes/shutdown"),String.valueOf(clusterMetrics.getShutdownNodes()))._().
-      _().
-    _()._();
+        td().a(url("nodes"), String.valueOf(clusterMetrics.getActiveNodes())).__().
+        td().a(url("nodes/decommissioning"), String.valueOf(clusterMetrics.getDecommissioningNodes())).__().
+        td().a(url("nodes/decommissioned"), String.valueOf(clusterMetrics.getDecommissionedNodes())).__().
+        td().a(url("nodes/lost"), String.valueOf(clusterMetrics.getLostNodes())).__().
+        td().a(url("nodes/unhealthy"), String.valueOf(clusterMetrics.getUnhealthyNodes())).__().
+        td().a(url("nodes/rebooted"), String.valueOf(clusterMetrics.getRebootedNodes())).__().
+        td().a(url("nodes/shutdown"), String.valueOf(clusterMetrics.getShutdownNodes())).__().
+        __().
+        __().__();
 
     String user = request().getRemoteUser();
     if (user != null) {
@@ -128,21 +128,21 @@ public class MetricsOverviewTable extends HtmlBlock {
         table("#usermetricsoverview").
         thead().$class("ui-widget-header").
           tr().
-            th().$class("ui-state-default")._("Apps Submitted")._().
-            th().$class("ui-state-default")._("Apps Pending")._().
-            th().$class("ui-state-default")._("Apps Running")._().
-            th().$class("ui-state-default")._("Apps Completed")._().
-            th().$class("ui-state-default")._("Containers Running")._().
-            th().$class("ui-state-default")._("Containers Pending")._().
-            th().$class("ui-state-default")._("Containers Reserved")._().
-            th().$class("ui-state-default")._("Memory Used")._().
-            th().$class("ui-state-default")._("Memory Pending")._().
-            th().$class("ui-state-default")._("Memory Reserved")._().
-            th().$class("ui-state-default")._("VCores Used")._().
-            th().$class("ui-state-default")._("VCores Pending")._().
-            th().$class("ui-state-default")._("VCores Reserved")._().
-          _().
-        _().
+            th().$class("ui-state-default").__("Apps Submitted").__().
+            th().$class("ui-state-default").__("Apps Pending").__().
+            th().$class("ui-state-default").__("Apps Running").__().
+            th().$class("ui-state-default").__("Apps Completed").__().
+            th().$class("ui-state-default").__("Containers Running").__().
+            th().$class("ui-state-default").__("Containers Pending").__().
+            th().$class("ui-state-default").__("Containers Reserved").__().
+            th().$class("ui-state-default").__("Memory Used").__().
+            th().$class("ui-state-default").__("Memory Pending").__().
+            th().$class("ui-state-default").__("Memory Reserved").__().
+            th().$class("ui-state-default").__("VCores Used").__().
+            th().$class("ui-state-default").__("VCores Pending").__().
+            th().$class("ui-state-default").__("VCores Reserved").__().
+            __().
+            __().
         tbody().$class("ui-widget-content").
           tr().
             td(String.valueOf(userMetrics.getAppsSubmitted())).
@@ -163,8 +163,8 @@ public class MetricsOverviewTable extends HtmlBlock {
             td(String.valueOf(userMetrics.getAllocatedVirtualCores())).
             td(String.valueOf(userMetrics.getPendingVirtualCores())).
             td(String.valueOf(userMetrics.getReservedVirtualCores())).
-          _().
-        _()._();
+            __().
+            __().__();
         
       }
     }
@@ -175,14 +175,14 @@ public class MetricsOverviewTable extends HtmlBlock {
     table("#schedulermetricsoverview").
     thead().$class("ui-widget-header").
       tr().
-        th().$class("ui-state-default")._("Scheduler Type")._().
-        th().$class("ui-state-default")._("Scheduling Resource Type")._().
-        th().$class("ui-state-default")._("Minimum Allocation")._().
-        th().$class("ui-state-default")._("Maximum Allocation")._().
+        th().$class("ui-state-default").__("Scheduler Type").__().
+        th().$class("ui-state-default").__("Scheduling Resource Type").__().
+        th().$class("ui-state-default").__("Minimum Allocation").__().
+        th().$class("ui-state-default").__("Maximum Allocation").__().
         th().$class("ui-state-default")
-            ._("Maximum Cluster Application Priority")._().
-      _().
-    _().
+            .__("Maximum Cluster Application Priority").__().
+        __().
+        __().
     tbody().$class("ui-widget-content").
       tr().
         td(String.valueOf(schedulerInfo.getSchedulerType())).
@@ -190,9 +190,9 @@ public class MetricsOverviewTable extends HtmlBlock {
         td(schedulerInfo.getMinAllocation().toString()).
         td(schedulerInfo.getMaxAllocation().toString()).
         td(String.valueOf(schedulerInfo.getMaxClusterLevelAppPriority())).
-      _().
-    _()._();
+        __().
+        __().__();
 
-    div._();
+    div.__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
index ca55175..1993f6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
@@ -23,10 +23,10 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.LI;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.LI;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.UL;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 public class NavBlock extends HtmlBlock {
@@ -45,29 +45,29 @@ public class NavBlock extends HtmlBlock {
       div("#nav").
         h3("Cluster").
         ul().
-          li().a(url("cluster"), "About")._().
-          li().a(url("nodes"), "Nodes")._().
-          li().a(url("nodelabels"), "Node Labels")._();
+          li().a(url("cluster"), "About").__().
+          li().a(url("nodes"), "Nodes").__().
+          li().a(url("nodelabels"), "Node Labels").__();
     UL<LI<UL<DIV<Hamlet>>>> subAppsList = mainList.
           li().a(url("apps"), "Applications").
             ul();
-    subAppsList.li()._();
+    subAppsList.li().__();
     for (YarnApplicationState state : YarnApplicationState.values()) {
       subAppsList.
-              li().a(url("apps", state.toString()), state.toString())._();
+              li().a(url("apps", state.toString()), state.toString()).__();
     }
-    subAppsList._()._();
+    subAppsList.__().__();
     UL<DIV<Hamlet>> tools = mainList.
-          li().a(url("scheduler"), "Scheduler")._()._().
+          li().a(url("scheduler"), "Scheduler").__().__().
         h3("Tools").ul();
-    tools.li().a("/conf", "Configuration")._().
-          li().a("/logs", "Local logs")._().
-          li().a("/stacks", "Server stacks")._().
-          li().a("/jmx?qry=Hadoop:*", "Server metrics")._();
+    tools.li().a("/conf", "Configuration").__().
+          li().a("/logs", "Local logs").__().
+          li().a("/stacks", "Server stacks").__().
+          li().a("/jmx?qry=Hadoop:*", "Server metrics").__();
 
     if (addErrorsAndWarningsLink) {
-      tools.li().a(url("errors-and-warnings"), "Errors/Warnings")._();
+      tools.li().a(url("errors-and-warnings"), "Errors/Warnings").__();
     }
-    tools._()._();
+    tools.__().__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java
index ea85d13..6ff7628 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java
@@ -26,10 +26,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TR;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -53,7 +53,7 @@ public class NodeLabelsPage extends RmView {
           th(".type", "Label Type").
           th(".numOfActiveNMs", "Num Of Active NMs").
           th(".totalResource", "Total Resource").
-          _()._().
+          __().__().
           tbody();
   
       RMNodeLabelsManager nlm = rm.getRMContext().getNodeLabelManager();
@@ -71,17 +71,17 @@ public class NodeLabelsPage extends RmView {
           .a(url("nodes",
               "?" + YarnWebParams.NODE_LABEL + "=" + info.getLabelName()),
               String.valueOf(nActiveNMs))
-           ._();
+           .__();
         } else {
           row = row.td(String.valueOf(nActiveNMs));
         }
-        row.td(info.getResource().toString())._();
+        row.td(info.getResource().toString()).__();
       }
-      tbody._()._();
+      tbody.__().__();
     }
   }
 
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     String title = "Node labels of the cluster";
     setTitle(title);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
index c03df63..d0e384d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
@@ -30,9 +30,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.SubView;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import java.util.Collection;
@@ -63,7 +63,7 @@ class NodesPage extends RmView {
 
     @Override
     protected void render(Block html) {
-      html._(MetricsOverviewTable.class);
+      html.__(MetricsOverviewTable.class);
 
       ResourceScheduler sched = rm.getResourceScheduler();
 
@@ -98,7 +98,7 @@ class NodesPage extends RmView {
       }
 
       TBODY<TABLE<Hamlet>> tbody =
-          trbody.th(".nodeManagerVersion", "Version")._()._().tbody();
+          trbody.th(".nodeManagerVersion", "Version").__().__().tbody();
 
       NodeState stateFilter = null;
       if (type != null && !type.isEmpty()) {
@@ -201,13 +201,13 @@ class NodesPage extends RmView {
       }
       nodeTableData.append("]");
       html.script().$type("text/javascript")
-          ._("var nodeTableData=" + nodeTableData)._();
-      tbody._()._();
+          .__("var nodeTableData=" + nodeTableData).__();
+      tbody.__().__();
     }
   }
 
   @Override
-  protected void preHead(Page.HTML<_> html) {
+  protected void preHead(Page.HTML<__> html) {
     commonPreHead(html);
     String type = $(NODE_STATE);
     String title = "Nodes of the cluster";


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: HDFS-12062. removeErasureCodingPolicy needs super user permission. Contributed by Wei-Chiu Chuang.

Posted by xg...@apache.org.
HDFS-12062. removeErasureCodingPolicy needs super user permission. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/369f7312
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/369f7312
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/369f7312

Branch: refs/heads/YARN-5734
Commit: 369f731264d77617452e4074d15404bd62ec6093
Parents: 9902be7
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Fri Jul 28 00:50:08 2017 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Fri Jul 28 00:51:03 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 26 ++++++++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  1 +
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 50 ++++++++++++++++++++
 .../hadoop/hdfs/TestErasureCodingPolicies.java  | 20 ++++++++
 4 files changed, 93 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/369f7312/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 8acda61..677ea35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2774,25 +2774,43 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
   public AddECPolicyResponse[] addErasureCodingPolicies(
       ErasureCodingPolicy[] policies) throws IOException {
     checkOpen();
-    return namenode.addErasureCodingPolicies(policies);
+    try (TraceScope ignored = tracer.newScope("addErasureCodingPolicies")) {
+      return namenode.addErasureCodingPolicies(policies);
+    } catch (RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class);
+    }
   }
 
   public void removeErasureCodingPolicy(String ecPolicyName)
       throws IOException {
     checkOpen();
-    namenode.removeErasureCodingPolicy(ecPolicyName);
+    try (TraceScope ignored = tracer.newScope("removeErasureCodingPolicy")) {
+      namenode.removeErasureCodingPolicy(ecPolicyName);
+    } catch (RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class);
+    }
   }
 
   public void enableErasureCodingPolicy(String ecPolicyName)
       throws IOException {
     checkOpen();
-    namenode.enableErasureCodingPolicy(ecPolicyName);
+    try (TraceScope ignored = tracer.newScope("enableErasureCodingPolicy")) {
+      namenode.enableErasureCodingPolicy(ecPolicyName);
+    } catch (RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class,
+          SafeModeException.class);
+    }
   }
 
   public void disableErasureCodingPolicy(String ecPolicyName)
       throws IOException {
     checkOpen();
-    namenode.disableErasureCodingPolicy(ecPolicyName);
+    try (TraceScope ignored = tracer.newScope("disableErasureCodingPolicy")) {
+      namenode.disableErasureCodingPolicy(ecPolicyName);
+    } catch (RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class,
+          SafeModeException.class);
+    }
   }
 
   public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/369f7312/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 39d93df..9cd58cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -2304,6 +2304,7 @@ public class NameNodeRpcServer implements NamenodeProtocols {
   public void removeErasureCodingPolicy(String ecPolicyName)
       throws IOException {
     checkNNStartup();
+    namesystem.checkSuperuserPrivilege();
     namesystem.removeErasureCodingPolicy(ecPolicyName);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/369f7312/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index b35d374..9525609 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -93,6 +93,7 @@ import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.ScriptBasedMapping;
 import org.apache.hadoop.net.StaticMapping;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
@@ -1561,6 +1562,27 @@ public class TestDistributedFileSystem {
       fs.removeErasureCodingPolicy(policyName);
       assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
           getRemovedPolicies().get(0).getName());
+
+      // remove erasure coding policy as a user without privilege
+      UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting(
+          "ProbablyNotARealUserName", new String[] {"ShangriLa"});
+      final MiniDFSCluster finalCluster = cluster;
+      fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          DistributedFileSystem fs = finalCluster.getFileSystem();
+          try {
+            fs.removeErasureCodingPolicy(policyName);
+            fail();
+          } catch (AccessControlException ace) {
+            GenericTestUtils.assertExceptionContains("Access denied for user " +
+                "ProbablyNotARealUserName. Superuser privilege is required",
+                ace);
+          }
+          return null;
+        }
+      });
+
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -1609,6 +1631,34 @@ public class TestDistributedFileSystem {
         GenericTestUtils.assertExceptionContains("does not exists", e);
         // pass
       }
+
+      // disable and enable erasure coding policy as a user without privilege
+      UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting(
+          "ProbablyNotARealUserName", new String[] {"ShangriLa"});
+      final MiniDFSCluster finalCluster = cluster;
+      fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          DistributedFileSystem fs = finalCluster.getFileSystem();
+          try {
+            fs.disableErasureCodingPolicy(policyName);
+            fail();
+          } catch (AccessControlException ace) {
+            GenericTestUtils.assertExceptionContains("Access denied for user " +
+                    "ProbablyNotARealUserName. Superuser privilege is required",
+                ace);
+          }
+          try {
+            fs.enableErasureCodingPolicy(policyName);
+            fail();
+          } catch (AccessControlException ace) {
+            GenericTestUtils.assertExceptionContains("Access denied for user " +
+                    "ProbablyNotARealUserName. Superuser privilege is required",
+                ace);
+          }
+          return null;
+        }
+      });
     } finally {
       if (cluster != null) {
         cluster.shutdown();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/369f7312/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index f90a2f3..127dad1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -693,5 +693,25 @@ public class TestErasureCodingPolicies {
     assertTrue(responses[0].isSucceed());
     assertEquals(SystemErasureCodingPolicies.getPolicies().size() + 1,
         ErasureCodingPolicyManager.getInstance().getPolicies().length);
+
+    // add erasure coding policy as a user without privilege
+    UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting(
+        "ProbablyNotARealUserName", new String[] {"ShangriLa"});
+    final ErasureCodingPolicy ecPolicy = newPolicy;
+    fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        DistributedFileSystem fs = cluster.getFileSystem();
+        try {
+          fs.addErasureCodingPolicies(new ErasureCodingPolicy[]{ecPolicy});
+          fail();
+        } catch (AccessControlException ace) {
+          GenericTestUtils.assertExceptionContains("Access denied for user " +
+                  "ProbablyNotARealUserName. Superuser privilege is required",
+              ace);
+        }
+        return null;
+      }
+    });
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: HADOOP-14597. Native compilation broken with OpenSSL-1.1.0. Contributed by Ravi Prakash.

Posted by xg...@apache.org.
HADOOP-14597. Native compilation broken with OpenSSL-1.1.0. Contributed by Ravi Prakash.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94ca52ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94ca52ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94ca52ae

Branch: refs/heads/YARN-5734
Commit: 94ca52ae9ec0ae04854d726bf2ac1bc457b96a9c
Parents: 1058362
Author: Ravi Prakash <ra...@apache.org>
Authored: Mon Jul 24 16:01:45 2017 -0700
Committer: Ravi Prakash <ra...@apache.org>
Committed: Mon Jul 24 16:01:45 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/crypto/OpensslCipher.c    | 46 ++++++++++++++++++--
 .../src/main/native/pipes/impl/HadoopPipes.cc   | 12 ++++-
 2 files changed, 53 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94ca52ae/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c
index 5cb5bba..c7984a3 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c
@@ -30,6 +30,11 @@ static void (*dlsym_EVP_CIPHER_CTX_free)(EVP_CIPHER_CTX *);
 static int (*dlsym_EVP_CIPHER_CTX_cleanup)(EVP_CIPHER_CTX *);
 static void (*dlsym_EVP_CIPHER_CTX_init)(EVP_CIPHER_CTX *);
 static int (*dlsym_EVP_CIPHER_CTX_set_padding)(EVP_CIPHER_CTX *, int);
+static int (*dlsym_EVP_CIPHER_CTX_test_flags)(const EVP_CIPHER_CTX *, int);
+static int (*dlsym_EVP_CIPHER_CTX_block_size)(const EVP_CIPHER_CTX *);
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+static int (*dlsym_EVP_CIPHER_CTX_encrypting)(const EVP_CIPHER_CTX *);
+#endif
 static int (*dlsym_EVP_CipherInit_ex)(EVP_CIPHER_CTX *, const EVP_CIPHER *,  \
            ENGINE *, const unsigned char *, const unsigned char *, int);
 static int (*dlsym_EVP_CipherUpdate)(EVP_CIPHER_CTX *, unsigned char *,  \
@@ -46,6 +51,11 @@ typedef void (__cdecl *__dlsym_EVP_CIPHER_CTX_free)(EVP_CIPHER_CTX *);
 typedef int (__cdecl *__dlsym_EVP_CIPHER_CTX_cleanup)(EVP_CIPHER_CTX *);
 typedef void (__cdecl *__dlsym_EVP_CIPHER_CTX_init)(EVP_CIPHER_CTX *);
 typedef int (__cdecl *__dlsym_EVP_CIPHER_CTX_set_padding)(EVP_CIPHER_CTX *, int);
+typedef int (__cdecl *__dlsym_EVP_CIPHER_CTX_test_flags)(const EVP_CIPHER_CTX *, int);
+typedef int (__cdecl *__dlsym_EVP_CIPHER_CTX_block_size)(const EVP_CIPHER_CTX *);
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+typedef int (__cdecl *__dlsym_EVP_CIPHER_CTX_encrypting)(const EVP_CIPHER_CTX *);
+#endif
 typedef int (__cdecl *__dlsym_EVP_CipherInit_ex)(EVP_CIPHER_CTX *,  \
              const EVP_CIPHER *, ENGINE *, const unsigned char *,  \
              const unsigned char *, int);
@@ -60,6 +70,11 @@ static __dlsym_EVP_CIPHER_CTX_free dlsym_EVP_CIPHER_CTX_free;
 static __dlsym_EVP_CIPHER_CTX_cleanup dlsym_EVP_CIPHER_CTX_cleanup;
 static __dlsym_EVP_CIPHER_CTX_init dlsym_EVP_CIPHER_CTX_init;
 static __dlsym_EVP_CIPHER_CTX_set_padding dlsym_EVP_CIPHER_CTX_set_padding;
+static __dlsym_EVP_CIPHER_CTX_test_flags dlsym_EVP_CIPHER_CTX_test_flags;
+static __dlsym_EVP_CIPHER_CTX_block_size dlsym_EVP_CIPHER_CTX_block_size;
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+static __dlsym_EVP_CIPHER_CTX_encrypting dlsym_EVP_CIPHER_CTX_encrypting;
+#endif
 static __dlsym_EVP_CipherInit_ex dlsym_EVP_CipherInit_ex;
 static __dlsym_EVP_CipherUpdate dlsym_EVP_CipherUpdate;
 static __dlsym_EVP_CipherFinal_ex dlsym_EVP_CipherFinal_ex;
@@ -114,6 +129,14 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_initIDs
                       "EVP_CIPHER_CTX_init");
   LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_set_padding, env, openssl,  \
                       "EVP_CIPHER_CTX_set_padding");
+  LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_test_flags, env, openssl,  \
+                      "EVP_CIPHER_CTX_test_flags");
+  LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_block_size, env, openssl,  \
+                      "EVP_CIPHER_CTX_block_size");
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+  LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_encrypting, env, openssl,  \
+                      "EVP_CIPHER_CTX_encrypting");
+#endif
   LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CipherInit_ex, env, openssl,  \
                       "EVP_CipherInit_ex");
   LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CipherUpdate, env, openssl,  \
@@ -135,6 +158,17 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_initIDs
   LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_set_padding,  \
                       dlsym_EVP_CIPHER_CTX_set_padding, env,  \
                       openssl, "EVP_CIPHER_CTX_set_padding");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_test_flags,  \
+                      dlsym_EVP_CIPHER_CTX_test_flags, env,  \
+                      openssl, "EVP_CIPHER_CTX_test_flags");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_block_size,  \
+                      dlsym_EVP_CIPHER_CTX_block_size, env,  \
+                      openssl, "EVP_CIPHER_CTX_block_size");
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+  LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_encrypting,  \
+                      dlsym_EVP_CIPHER_CTX_encrypting, env,  \
+                      openssl, "EVP_CIPHER_CTX_encrypting");
+#endif
   LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CipherInit_ex, dlsym_EVP_CipherInit_ex,  \
                       env, openssl, "EVP_CipherInit_ex");
   LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CipherUpdate, dlsym_EVP_CipherUpdate,  \
@@ -253,14 +287,18 @@ JNIEXPORT jlong JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_init
 static int check_update_max_output_len(EVP_CIPHER_CTX *context, int input_len, 
     int max_output_len)
 {
-  if (context->flags & EVP_CIPH_NO_PADDING) {
+  if (  dlsym_EVP_CIPHER_CTX_test_flags(context, EVP_CIPH_NO_PADDING) ) {
     if (max_output_len >= input_len) {
       return 1;
     }
     return 0;
   } else {
-    int b = context->cipher->block_size;
+    int b = dlsym_EVP_CIPHER_CTX_block_size(context);
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
     if (context->encrypt) {
+#else
+    if (dlsym_EVP_CIPHER_CTX_encrypting(context)) {
+#endif
       if (max_output_len >= input_len + b - 1) {
         return 1;
       }
@@ -307,10 +345,10 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_update
 static int check_doFinal_max_output_len(EVP_CIPHER_CTX *context, 
     int max_output_len)
 {
-  if (context->flags & EVP_CIPH_NO_PADDING) {
+  if (  dlsym_EVP_CIPHER_CTX_test_flags(context, EVP_CIPH_NO_PADDING) ) {
     return 1;
   } else {
-    int b = context->cipher->block_size;
+    int b = dlsym_EVP_CIPHER_CTX_block_size(context);
     if (max_output_len >= b) {
       return 1;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94ca52ae/hadoop-tools/hadoop-pipes/src/main/native/pipes/impl/HadoopPipes.cc
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-pipes/src/main/native/pipes/impl/HadoopPipes.cc b/hadoop-tools/hadoop-pipes/src/main/native/pipes/impl/HadoopPipes.cc
index 91fb5a4..45cb8c2 100644
--- a/hadoop-tools/hadoop-pipes/src/main/native/pipes/impl/HadoopPipes.cc
+++ b/hadoop-tools/hadoop-pipes/src/main/native/pipes/impl/HadoopPipes.cc
@@ -420,6 +420,7 @@ namespace HadoopPipes {
     }
 
     string createDigest(string &password, string& msg) {
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
       HMAC_CTX ctx;
       unsigned char digest[EVP_MAX_MD_SIZE];
       HMAC_Init(&ctx, (const unsigned char *)password.c_str(), 
@@ -428,7 +429,16 @@ namespace HadoopPipes {
       unsigned int digestLen;
       HMAC_Final(&ctx, digest, &digestLen);
       HMAC_cleanup(&ctx);
-
+#else
+      HMAC_CTX *ctx = HMAC_CTX_new();
+      unsigned char digest[EVP_MAX_MD_SIZE];
+      HMAC_Init_ex(ctx, (const unsigned char *)password.c_str(),
+          password.length(), EVP_sha1(), NULL);
+      HMAC_Update(ctx, (const unsigned char *)msg.c_str(), msg.length());
+      unsigned int digestLen;
+      HMAC_Final(ctx, digest, &digestLen);
+      HMAC_CTX_free(ctx);
+#endif
       //now apply base64 encoding
       BIO *bmem, *b64;
       BUF_MEM *bptr;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: HADOOP-14455. ViewFileSystem#rename should support be supported within same nameservice with different mountpoints. Contributed by Brahma Reddy Battula.

Posted by xg...@apache.org.
HADOOP-14455. ViewFileSystem#rename should support be supported within same nameservice with different mountpoints. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d983cca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d983cca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d983cca

Branch: refs/heads/YARN-5734
Commit: 6d983cca52f113118bf49fec527ffb3eb869290a
Parents: 1a79dcf
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Tue Jul 25 23:20:35 2017 +0800
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Tue Jul 25 23:51:53 2017 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/fs/viewfs/Constants.java  |  2 +
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java | 79 +++++++++++-----
 .../org/apache/hadoop/fs/viewfs/ViewFs.java     | 43 ++++-----
 .../src/main/resources/core-default.xml         |  9 ++
 .../conf/TestCommonConfigurationFields.java     |  1 +
 .../hadoop/fs/contract/ContractTestUtils.java   | 54 +++++++++++
 .../fs/viewfs/ViewFileSystemBaseTest.java       | 79 +++++++++++++---
 .../apache/hadoop/fs/viewfs/ViewFsBaseTest.java | 94 ++++++++++++++++----
 .../fs/viewfs/TestViewFileSystemHdfs.java       | 22 +++++
 9 files changed, 309 insertions(+), 74 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d983cca/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
index ec8ab2b..9882a8e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
@@ -66,4 +66,6 @@ public interface Constants {
 
   static public final FsPermission PERMISSION_555 =
       new FsPermission((short) 0555);
+
+  String CONFIG_VIEWFS_RENAME_STRATEGY = "fs.viewfs.rename.strategy";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d983cca/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 8265d89..158b099 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -126,7 +126,8 @@ public class ViewFileSystem extends FileSystem {
   Configuration config;
   InodeTree<FileSystem> fsState;  // the fs state; ie the mount table
   Path homeDir = null;
-  
+  // Default to rename within same mountpoint
+  private RenameStrategy renameStrategy = RenameStrategy.SAME_MOUNTPOINT;
   /**
    * Make the path Absolute and get the path-part of a pathname.
    * Checks that URI matches this file system 
@@ -207,6 +208,9 @@ public class ViewFileSystem extends FileSystem {
         }
       };
       workingDir = this.getHomeDirectory();
+      renameStrategy = RenameStrategy.valueOf(
+          conf.get(Constants.CONFIG_VIEWFS_RENAME_STRATEGY,
+              RenameStrategy.SAME_MOUNTPOINT.toString()));
     } catch (URISyntaxException e) {
       throw new IOException("URISyntax exception: " + theUri);
     }
@@ -490,27 +494,55 @@ public class ViewFileSystem extends FileSystem {
     if (resDst.isInternalDir()) {
           throw readOnlyMountTable("rename", dst);
     }
-    /**
-    // Alternate 1: renames within same file system - valid but we disallow
-    // Alternate 2: (as described in next para - valid but we have disallowed it
-    //
-    // Note we compare the URIs. the URIs include the link targets. 
-    // hence we allow renames across mount links as long as the mount links
-    // point to the same target.
-    if (!resSrc.targetFileSystem.getUri().equals(
-              resDst.targetFileSystem.getUri())) {
-      throw new IOException("Renames across Mount points not supported");
-    }
-    */
-    
-    //
-    // Alternate 3 : renames ONLY within the the same mount links.
-    //
-    if (resSrc.targetFileSystem !=resDst.targetFileSystem) {
-      throw new IOException("Renames across Mount points not supported");
+
+    URI srcUri = resSrc.targetFileSystem.getUri();
+    URI dstUri = resDst.targetFileSystem.getUri();
+
+    verifyRenameStrategy(srcUri, dstUri,
+        resSrc.targetFileSystem == resDst.targetFileSystem, renameStrategy);
+
+    ChRootedFileSystem srcFS = (ChRootedFileSystem) resSrc.targetFileSystem;
+    ChRootedFileSystem dstFS = (ChRootedFileSystem) resDst.targetFileSystem;
+    return srcFS.getMyFs().rename(srcFS.fullPath(resSrc.remainingPath),
+        dstFS.fullPath(resDst.remainingPath));
+  }
+
+  static void verifyRenameStrategy(URI srcUri, URI dstUri,
+      boolean isSrcDestSame, ViewFileSystem.RenameStrategy renameStrategy)
+      throws IOException {
+    switch (renameStrategy) {
+    case SAME_FILESYSTEM_ACROSS_MOUNTPOINT:
+      if (srcUri.getAuthority() != null) {
+        if (!(srcUri.getScheme().equals(dstUri.getScheme()) && srcUri
+            .getAuthority().equals(dstUri.getAuthority()))) {
+          throw new IOException("Renames across Mount points not supported");
+        }
+      }
+
+      break;
+    case SAME_TARGET_URI_ACROSS_MOUNTPOINT:
+      // Alternate 2: Rename across mountpoints with same target.
+      // i.e. Rename across alias mountpoints.
+      //
+      // Note we compare the URIs. the URIs include the link targets.
+      // hence we allow renames across mount links as long as the mount links
+      // point to the same target.
+      if (!srcUri.equals(dstUri)) {
+        throw new IOException("Renames across Mount points not supported");
+      }
+
+      break;
+    case SAME_MOUNTPOINT:
+      //
+      // Alternate 3 : renames ONLY within the the same mount links.
+      //
+      if (!isSrcDestSame) {
+        throw new IOException("Renames across Mount points not supported");
+      }
+      break;
+    default:
+      throw new IllegalArgumentException ("Unexpected rename strategy");
     }
-    return resSrc.targetFileSystem.rename(resSrc.remainingPath,
-        resDst.remainingPath);
   }
 
   @Override
@@ -1241,4 +1273,9 @@ public class ViewFileSystem extends FileSystem {
       return allPolicies;
     }
   }
+
+  enum RenameStrategy {
+    SAME_MOUNTPOINT, SAME_TARGET_URI_ACROSS_MOUNTPOINT,
+    SAME_FILESYSTEM_ACROSS_MOUNTPOINT
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d983cca/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
index 3a34a91..364485f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
@@ -157,7 +157,9 @@ public class ViewFs extends AbstractFileSystem {
   final Configuration config;
   InodeTree<AbstractFileSystem> fsState;  // the fs state; ie the mount table
   Path homeDir = null;
-  
+  private ViewFileSystem.RenameStrategy renameStrategy =
+      ViewFileSystem.RenameStrategy.SAME_MOUNTPOINT;
+
   static AccessControlException readOnlyMountTable(final String operation,
       final String p) {
     return new AccessControlException( 
@@ -237,6 +239,9 @@ public class ViewFs extends AbstractFileSystem {
         // return MergeFs.createMergeFs(mergeFsURIList, config);
       }
     };
+    renameStrategy = ViewFileSystem.RenameStrategy.valueOf(
+        conf.get(Constants.CONFIG_VIEWFS_RENAME_STRATEGY,
+            ViewFileSystem.RenameStrategy.SAME_MOUNTPOINT.toString()));
   }
 
   @Override
@@ -495,37 +500,23 @@ public class ViewFs extends AbstractFileSystem {
               + " is readOnly");
     }
 
-    InodeTree.ResolveResult<AbstractFileSystem> resDst = 
+    InodeTree.ResolveResult<AbstractFileSystem> resDst =
                                 fsState.resolve(getUriPath(dst), false);
     if (resDst.isInternalDir()) {
       throw new AccessControlException(
           "Cannot Rename within internal dirs of mount table: dest=" + dst
               + " is readOnly");
     }
-    
-    /**
-    // Alternate 1: renames within same file system - valid but we disallow
-    // Alternate 2: (as described in next para - valid but we have disallowed it
-    //
-    // Note we compare the URIs. the URIs include the link targets. 
-    // hence we allow renames across mount links as long as the mount links
-    // point to the same target.
-    if (!resSrc.targetFileSystem.getUri().equals(
-              resDst.targetFileSystem.getUri())) {
-      throw new IOException("Renames across Mount points not supported");
-    }
-    */
-    
-    //
-    // Alternate 3 : renames ONLY within the the same mount links.
-    //
-
-    if (resSrc.targetFileSystem !=resDst.targetFileSystem) {
-      throw new IOException("Renames across Mount points not supported");
-    }
-    
-    resSrc.targetFileSystem.renameInternal(resSrc.remainingPath,
-      resDst.remainingPath, overwrite);
+    //Alternate 1: renames within same file system
+    URI srcUri = resSrc.targetFileSystem.getUri();
+    URI dstUri = resDst.targetFileSystem.getUri();
+    ViewFileSystem.verifyRenameStrategy(srcUri, dstUri,
+        resSrc.targetFileSystem == resDst.targetFileSystem, renameStrategy);
+
+    ChRootedFs srcFS = (ChRootedFs) resSrc.targetFileSystem;
+    ChRootedFs dstFS = (ChRootedFs) resDst.targetFileSystem;
+    srcFS.getMyFs().renameInternal(srcFS.fullPath(resSrc.remainingPath),
+        dstFS.fullPath(resDst.remainingPath), overwrite);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d983cca/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index d5ddc7f..593fd85 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -801,6 +801,15 @@
 </property>
 
 <property>
+  <name>fs.viewfs.rename.strategy</name>
+  <value>SAME_MOUNTPOINT</value>
+  <description>Allowed rename strategy to rename between multiple mountpoints.
+    Allowed values are SAME_MOUNTPOINT,SAME_TARGET_URI_ACROSS_MOUNTPOINT and
+    SAME_FILESYSTEM_ACROSS_MOUNTPOINT.
+  </description>
+</property>
+
+<property>
   <name>fs.AbstractFileSystem.ftp.impl</name>
   <value>org.apache.hadoop.fs.ftp.FtpFs</value>
   <description>The FileSystem for Ftp: uris.</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d983cca/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index ef74cba..da37e68 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -95,6 +95,7 @@ public class TestCommonConfigurationFields extends TestConfigurationFieldsBase {
     xmlPropsToSkipCompare.add("nfs3.mountd.port");
     xmlPropsToSkipCompare.add("nfs3.server.port");
     xmlPropsToSkipCompare.add("test.fs.s3n.name");
+    xmlPropsToSkipCompare.add("fs.viewfs.rename.strategy");
 
     // S3N/S3A properties are in a different subtree.
     // - org.apache.hadoop.fs.s3native.S3NativeFileSystemConfigKeys

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d983cca/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index 39c6d18..e60fd43 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.contract;
 
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocatedFileStatus;
@@ -718,6 +719,21 @@ public class ContractTestUtils extends Assert {
   /**
    * Assert that a file exists and whose {@link FileStatus} entry
    * declares that this is a file and not a symlink or directory.
+   *
+   * @param fileContext filesystem to resolve path against
+   * @param filename    name of the file
+   * @throws IOException IO problems during file operations
+   */
+  public static void assertIsFile(FileContext fileContext, Path filename)
+      throws IOException {
+    assertPathExists(fileContext, "Expected file", filename);
+    FileStatus status = fileContext.getFileStatus(filename);
+    assertIsFile(filename, status);
+  }
+
+  /**
+   * Assert that a file exists and whose {@link FileStatus} entry
+   * declares that this is a file and not a symlink or directory.
    * @param filename name of the file
    * @param status file status
    */
@@ -766,6 +782,25 @@ public class ContractTestUtils extends Assert {
   }
 
   /**
+   * Assert that a path exists -but make no assertions as to the
+   * type of that entry.
+   *
+   * @param fileContext fileContext to examine
+   * @param message     message to include in the assertion failure message
+   * @param path        path in the filesystem
+   * @throws FileNotFoundException raised if the path is missing
+   * @throws IOException           IO problems
+   */
+  public static void assertPathExists(FileContext fileContext, String message,
+      Path path) throws IOException {
+    if (!fileContext.util().exists(path)) {
+      //failure, report it
+      throw new FileNotFoundException(
+          message + ": not found " + path + " in " + path.getParent());
+    }
+  }
+
+  /**
    * Assert that a path does not exist.
    *
    * @param fileSystem filesystem to examine
@@ -786,6 +821,25 @@ public class ContractTestUtils extends Assert {
   }
 
   /**
+   * Assert that a path does not exist.
+   *
+   * @param fileContext fileContext to examine
+   * @param message     message to include in the assertion failure message
+   * @param path        path in the filesystem
+   * @throws IOException IO problems
+   */
+  public static void assertPathDoesNotExist(FileContext fileContext,
+      String message, Path path) throws IOException {
+    try {
+      FileStatus status = fileContext.getFileStatus(path);
+      fail(message + ": unexpectedly found " + path + " as  " + status);
+    } catch (FileNotFoundException expected) {
+      //this is expected
+
+    }
+  }
+
+  /**
    * Assert that a FileSystem.listStatus on a dir finds the subdir/child entry.
    * @param fs filesystem
    * @param dir directory to scan

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d983cca/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
index 68a7560..db2d2d7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclUtil;
@@ -51,6 +52,7 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assume;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -366,28 +368,83 @@ abstract public class ViewFileSystemBaseTest {
   }
   
   // rename across mount points that point to same target also fail 
-  @Test(expected=IOException.class) 
+  @Test
   public void testRenameAcrossMounts1() throws IOException {
     fileSystemTestHelper.createFile(fsView, "/user/foo");
-    fsView.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
-    /* - code if we had wanted this to succeed
-    Assert.assertFalse(fSys.exists(new Path("/user/foo")));
-    Assert.assertFalse(fSysLocal.exists(new Path(targetTestRoot,"user/foo")));
-    Assert.assertTrue(fSys.isFile(FileSystemTestHelper.getTestRootPath(fSys,"/user2/fooBarBar")));
-    Assert.assertTrue(fSysLocal.isFile(new Path(targetTestRoot,"user/fooBarBar")));
-    */
+    try {
+      fsView.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
+      ContractTestUtils.fail("IOException is not thrown on rename operation");
+    } catch (IOException e) {
+      GenericTestUtils
+          .assertExceptionContains("Renames across Mount points not supported",
+              e);
+    }
   }
   
   
   // rename across mount points fail if the mount link targets are different
   // even if the targets are part of the same target FS
 
-  @Test(expected=IOException.class) 
+  @Test
   public void testRenameAcrossMounts2() throws IOException {
     fileSystemTestHelper.createFile(fsView, "/user/foo");
-    fsView.rename(new Path("/user/foo"), new Path("/data/fooBar"));
+    try {
+      fsView.rename(new Path("/user/foo"), new Path("/data/fooBar"));
+      ContractTestUtils.fail("IOException is not thrown on rename operation");
+    } catch (IOException e) {
+      GenericTestUtils
+          .assertExceptionContains("Renames across Mount points not supported",
+              e);
+    }
   }
-  
+
+  // RenameStrategy SAME_TARGET_URI_ACROSS_MOUNTPOINT enabled
+  // to rename across mount points that point to same target URI
+  @Test
+  public void testRenameAcrossMounts3() throws IOException {
+    Configuration conf2 = new Configuration(conf);
+    conf2.set(Constants.CONFIG_VIEWFS_RENAME_STRATEGY,
+        ViewFileSystem.RenameStrategy.SAME_TARGET_URI_ACROSS_MOUNTPOINT
+            .toString());
+    FileSystem fsView2 = FileSystem.newInstance(FsConstants.VIEWFS_URI, conf2);
+    fileSystemTestHelper.createFile(fsView2, "/user/foo");
+    fsView2.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
+    ContractTestUtils
+        .assertPathDoesNotExist(fsView2, "src should not exist after rename",
+            new Path("/user/foo"));
+    ContractTestUtils
+        .assertPathDoesNotExist(fsTarget, "src should not exist after rename",
+            new Path(targetTestRoot, "user/foo"));
+    ContractTestUtils.assertIsFile(fsView2,
+        fileSystemTestHelper.getTestRootPath(fsView2, "/user2/fooBarBar"));
+    ContractTestUtils
+        .assertIsFile(fsTarget, new Path(targetTestRoot, "user/fooBarBar"));
+  }
+
+  // RenameStrategy SAME_FILESYSTEM_ACROSS_MOUNTPOINT enabled
+  // to rename across mount points where the mount link targets are different
+  // but are part of the same target FS
+  @Test
+  public void testRenameAcrossMounts4() throws IOException {
+    Configuration conf2 = new Configuration(conf);
+    conf2.set(Constants.CONFIG_VIEWFS_RENAME_STRATEGY,
+        ViewFileSystem.RenameStrategy.SAME_FILESYSTEM_ACROSS_MOUNTPOINT
+            .toString());
+    FileSystem fsView2 = FileSystem.newInstance(FsConstants.VIEWFS_URI, conf2);
+    fileSystemTestHelper.createFile(fsView2, "/user/foo");
+    fsView2.rename(new Path("/user/foo"), new Path("/data/fooBar"));
+    ContractTestUtils
+        .assertPathDoesNotExist(fsView2, "src should not exist after rename",
+            new Path("/user/foo"));
+    ContractTestUtils
+        .assertPathDoesNotExist(fsTarget, "src should not exist after rename",
+            new Path(targetTestRoot, "user/foo"));
+    ContractTestUtils.assertIsFile(fsView2,
+        fileSystemTestHelper.getTestRootPath(fsView2, "/data/fooBar"));
+    ContractTestUtils
+        .assertIsFile(fsTarget, new Path(targetTestRoot, "data/fooBar"));
+  }
+
   static protected boolean SupportsBlocks = false; //  local fs use 1 block
                                                    // override for HDFS
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d983cca/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
index fdc6389..d72ab74 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.local.LocalConfigKeys;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
@@ -66,6 +67,7 @@ import org.apache.hadoop.fs.viewfs.ViewFs.MountPoint;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -345,33 +347,93 @@ abstract public class ViewFsBaseTest {
   }
   
   // rename across mount points that point to same target also fail 
-  @Test(expected=IOException.class) 
+  @Test
   public void testRenameAcrossMounts1() throws IOException {
     fileContextTestHelper.createFile(fcView, "/user/foo");
-    fcView.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
-    /* - code if we had wanted this to succeed
-    Assert.assertFalse(exists(fc, new Path("/user/foo")));
-    Assert.assertFalse(exists(fclocal, new Path(targetTestRoot,"user/foo")));
-    Assert.assertTrue(isFile(fc,
-       FileContextTestHelper.getTestRootPath(fc,"/user2/fooBarBar")));
-    Assert.assertTrue(isFile(fclocal,
-        new Path(targetTestRoot,"user/fooBarBar")));
-    */
+    try {
+      fcView.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
+      ContractTestUtils.fail("IOException is not thrown on rename operation");
+    } catch (IOException e) {
+      GenericTestUtils
+          .assertExceptionContains("Renames across Mount points not supported",
+              e);
+    }
   }
   
   
   // rename across mount points fail if the mount link targets are different
   // even if the targets are part of the same target FS
 
-  @Test(expected=IOException.class) 
+  @Test
   public void testRenameAcrossMounts2() throws IOException {
     fileContextTestHelper.createFile(fcView, "/user/foo");
-    fcView.rename(new Path("/user/foo"), new Path("/data/fooBar"));
+    try {
+      fcView.rename(new Path("/user/foo"), new Path("/data/fooBar"));
+      ContractTestUtils.fail("IOException is not thrown on rename operation");
+    } catch (IOException e) {
+      GenericTestUtils
+          .assertExceptionContains("Renames across Mount points not supported",
+              e);
+    }
   }
-  
-  
-  
-  
+
+  // RenameStrategy SAME_TARGET_URI_ACROSS_MOUNTPOINT enabled
+  // to rename across mount points that point to same target URI
+  @Test
+  public void testRenameAcrossMounts3() throws IOException {
+    Configuration conf2 = new Configuration(conf);
+    conf2.set(Constants.CONFIG_VIEWFS_RENAME_STRATEGY,
+        ViewFileSystem.RenameStrategy.SAME_TARGET_URI_ACROSS_MOUNTPOINT
+            .toString());
+
+    FileContext fcView2 =
+        FileContext.getFileContext(FsConstants.VIEWFS_URI, conf2);
+    String user1Path = "/user/foo";
+    fileContextTestHelper.createFile(fcView2, user1Path);
+    String user2Path = "/user2/fooBarBar";
+    Path user2Dst = new Path(user2Path);
+    fcView2.rename(new Path(user1Path), user2Dst);
+    ContractTestUtils
+        .assertPathDoesNotExist(fcView2, "src should not exist after rename",
+            new Path(user1Path));
+    ContractTestUtils
+        .assertPathDoesNotExist(fcTarget, "src should not exist after rename",
+            new Path(targetTestRoot, "user/foo"));
+    ContractTestUtils.assertIsFile(fcView2,
+        fileContextTestHelper.getTestRootPath(fcView2, user2Path));
+    ContractTestUtils
+        .assertIsFile(fcTarget, new Path(targetTestRoot, "user/fooBarBar"));
+  }
+
+  // RenameStrategy SAME_FILESYSTEM_ACROSS_MOUNTPOINT enabled
+  // to rename across mount points if the mount link targets are different
+  // but are part of the same target FS
+  @Test
+  public void testRenameAcrossMounts4() throws IOException {
+    Configuration conf2 = new Configuration(conf);
+    conf2.set(Constants.CONFIG_VIEWFS_RENAME_STRATEGY,
+        ViewFileSystem.RenameStrategy.SAME_FILESYSTEM_ACROSS_MOUNTPOINT
+            .toString());
+    FileContext fcView2 =
+        FileContext.getFileContext(FsConstants.VIEWFS_URI, conf2);
+    String userPath = "/user/foo";
+    fileContextTestHelper.createFile(fcView2, userPath);
+    String anotherMountPath = "/data/fooBar";
+    Path anotherDst = new Path(anotherMountPath);
+    fcView2.rename(new Path(userPath), anotherDst);
+
+    ContractTestUtils
+        .assertPathDoesNotExist(fcView2, "src should not exist after rename",
+            new Path(userPath));
+    ContractTestUtils
+        .assertPathDoesNotExist(fcTarget, "src should not exist after rename",
+            new Path(targetTestRoot, "user/foo"));
+    ContractTestUtils.assertIsFile(fcView2,
+        fileContextTestHelper.getTestRootPath(fcView2, anotherMountPath));
+    ContractTestUtils
+        .assertIsFile(fcView2, new Path(targetTestRoot, "data/fooBar"));
+  }
+
   static protected boolean SupportsBlocks = false; //  local fs use 1 block
                                                    // override for HDFS
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d983cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
index 58b77f6..b8f5379 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -45,6 +46,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -247,4 +249,24 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
     Assert.assertTrue("File checksum not matching!",
         fileChecksumViaViewFs.equals(fileChecksumViaTargetFs));
   }
+
+  //Rename should fail on across different fileSystems
+  @Test
+  public void testRenameAccorssFilesystem() throws IOException {
+    //data is mountpoint in nn1
+    Path mountDataRootPath = new Path("/data");
+    //mountOnNn2 is nn2 mountpoint
+    Path fsTargetFilePath = new Path("/mountOnNn2");
+    Path filePath = new Path(mountDataRootPath + "/ttest");
+    Path hdfFilepath = new Path(fsTargetFilePath + "/ttest2");
+    fsView.create(filePath);
+    try {
+      fsView.rename(filePath, hdfFilepath);
+      ContractTestUtils.fail("Should thrown IOE on Renames across filesytems");
+    } catch (IOException e) {
+      GenericTestUtils
+          .assertExceptionContains("Renames across Mount points not supported",
+              e);
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: YARN-5946: Create YarnConfigurationStore interface and InMemoryConfigurationStore class. Contributed by Jonathan Hung

Posted by xg...@apache.org.
YARN-5946: Create YarnConfigurationStore interface and
InMemoryConfigurationStore class. Contributed by Jonathan Hung


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ad0caa25
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ad0caa25
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ad0caa25

Branch: refs/heads/YARN-5734
Commit: ad0caa25a553ed72c3806165b4edb0126133d4a5
Parents: 512f5c9
Author: Xuan <xg...@apache.org>
Authored: Fri Feb 24 15:58:12 2017 -0800
Committer: Xuan <xg...@apache.org>
Committed: Mon Jul 31 08:54:55 2017 -0700

----------------------------------------------------------------------
 .../conf/InMemoryConfigurationStore.java        |  86 +++++++++++
 .../capacity/conf/YarnConfigurationStore.java   | 154 +++++++++++++++++++
 .../conf/TestYarnConfigurationStore.java        |  70 +++++++++
 3 files changed, 310 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad0caa25/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
new file mode 100644
index 0000000..a208fb9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A default implementation of {@link YarnConfigurationStore}. Doesn't offer
+ * persistent configuration storage, just stores the configuration in memory.
+ */
+public class InMemoryConfigurationStore implements YarnConfigurationStore {
+
+  private Configuration schedConf;
+  private LinkedList<LogMutation> pendingMutations;
+  private long pendingId;
+
+  @Override
+  public void initialize(Configuration conf, Configuration schedConf) {
+    this.schedConf = schedConf;
+    this.pendingMutations = new LinkedList<>();
+    this.pendingId = 0;
+  }
+
+  @Override
+  public synchronized long logMutation(LogMutation logMutation) {
+    logMutation.setId(++pendingId);
+    pendingMutations.add(logMutation);
+    return pendingId;
+  }
+
+  @Override
+  public synchronized boolean confirmMutation(long id, boolean isValid) {
+    LogMutation mutation = pendingMutations.poll();
+    // If confirmMutation is called out of order, discard mutations until id
+    // is reached.
+    while (mutation != null) {
+      if (mutation.getId() == id) {
+        if (isValid) {
+          Map<String, String> mutations = mutation.getUpdates();
+          for (Map.Entry<String, String> kv : mutations.entrySet()) {
+            schedConf.set(kv.getKey(), kv.getValue());
+          }
+        }
+        return true;
+      }
+      mutation = pendingMutations.poll();
+    }
+    return false;
+  }
+
+  @Override
+  public synchronized Configuration retrieve() {
+    return schedConf;
+  }
+
+  @Override
+  public synchronized List<LogMutation> getPendingMutations() {
+    return pendingMutations;
+  }
+
+  @Override
+  public List<LogMutation> getConfirmedConfHistory(long fromId) {
+    // Unimplemented.
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad0caa25/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java
new file mode 100644
index 0000000..22c0ef8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java
@@ -0,0 +1,154 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * YarnConfigurationStore exposes the methods needed for retrieving and
+ * persisting {@link CapacityScheduler} configuration via key-value
+ * using write-ahead logging. When configuration mutation is requested, caller
+ * should first log it with {@code logMutation}, which persists this pending
+ * mutation. This mutation is merged to the persisted configuration only after
+ * {@code confirmMutation} is called.
+ *
+ * On startup/recovery, caller should call {@code retrieve} to get all
+ * confirmed mutations, then get pending mutations which were not confirmed via
+ * {@code getPendingMutations}, and replay/confirm them via
+ * {@code confirmMutation} as in the normal case.
+ */
+public interface YarnConfigurationStore {
+
+  /**
+   * LogMutation encapsulates the fields needed for configuration mutation
+   * audit logging and recovery.
+   */
+  class LogMutation {
+    private Map<String, String> updates;
+    private String user;
+    private long id;
+
+    /**
+     * Create log mutation prior to logging.
+     * @param updates key-value configuration updates
+     * @param user user who requested configuration change
+     */
+    public LogMutation(Map<String, String> updates, String user) {
+      this(updates, user, 0);
+    }
+
+    /**
+     * Create log mutation for recovery.
+     * @param updates key-value configuration updates
+     * @param user user who requested configuration change
+     * @param id transaction id of configuration change
+     */
+    LogMutation(Map<String, String> updates, String user, long id) {
+      this.updates = updates;
+      this.user = user;
+      this.id = id;
+    }
+
+    /**
+     * Get key-value configuration updates.
+     * @return map of configuration updates
+     */
+    public Map<String, String> getUpdates() {
+      return updates;
+    }
+
+    /**
+     * Get user who requested configuration change.
+     * @return user who requested configuration change
+     */
+    public String getUser() {
+      return user;
+    }
+
+    /**
+     * Get transaction id of this configuration change.
+     * @return transaction id
+     */
+    public long getId() {
+      return id;
+    }
+
+    /**
+     * Set transaction id of this configuration change.
+     * @param id transaction id
+     */
+    public void setId(long id) {
+      this.id = id;
+    }
+  }
+
+  /**
+   * Initialize the configuration store.
+   * @param conf configuration to initialize store with
+   * @param schedConf Initial key-value configuration to persist
+   */
+  void initialize(Configuration conf, Configuration schedConf);
+
+  /**
+   * Logs the configuration change to backing store. Generates an id associated
+   * with this mutation, sets it in {@code logMutation}, and returns it.
+   * @param logMutation configuration change to be persisted in write ahead log
+   * @return id which configuration store associates with this mutation
+   */
+  long logMutation(LogMutation logMutation);
+
+  /**
+   * Should be called after {@code logMutation}. Gets the pending mutation
+   * associated with {@code id} and marks the mutation as persisted (no longer
+   * pending). If isValid is true, merge the mutation with the persisted
+   * configuration.
+   *
+   * If {@code confirmMutation} is called with ids in a different order than
+   * was returned by {@code logMutation}, the result is implementation
+   * dependent.
+   * @param id id of mutation to be confirmed
+   * @param isValid if true, update persisted configuration with mutation
+   *                associated with {@code id}.
+   * @return true on success
+   */
+  boolean confirmMutation(long id, boolean isValid);
+
+  /**
+   * Retrieve the persisted configuration.
+   * @return configuration as key-value
+   */
+  Configuration retrieve();
+
+  /**
+   * Get the list of pending mutations, in the order they were logged.
+   * @return list of mutations
+   */
+  List<LogMutation> getPendingMutations();
+
+  /**
+   * Get a list of confirmed configuration mutations starting from a given id.
+   * @param fromId id from which to start getting mutations, inclusive
+   * @return list of configuration mutations
+   */
+  List<LogMutation> getConfirmedConfHistory(long fromId);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad0caa25/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestYarnConfigurationStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestYarnConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestYarnConfigurationStore.java
new file mode 100644
index 0000000..dff4e77
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestYarnConfigurationStore.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore.LogMutation;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+public class TestYarnConfigurationStore {
+
+  private YarnConfigurationStore confStore;
+  private Configuration schedConf;
+
+  private static final String testUser = "testUser";
+
+  @Before
+  public void setUp() {
+    schedConf = new Configuration(false);
+    schedConf.set("key1", "val1");
+  }
+
+  @Test
+  public void testInMemoryConfigurationStore() {
+    confStore = new InMemoryConfigurationStore();
+    confStore.initialize(new Configuration(), schedConf);
+    assertEquals("val1", confStore.retrieve().get("key1"));
+
+    Map<String, String> update1 = new HashMap<>();
+    update1.put("keyUpdate1", "valUpdate1");
+    LogMutation mutation1 = new LogMutation(update1, testUser);
+    long id = confStore.logMutation(mutation1);
+    assertEquals(1, confStore.getPendingMutations().size());
+    confStore.confirmMutation(id, true);
+    assertEquals("valUpdate1", confStore.retrieve().get("keyUpdate1"));
+    assertEquals(0, confStore.getPendingMutations().size());
+
+    Map<String, String> update2 = new HashMap<>();
+    update2.put("keyUpdate2", "valUpdate2");
+    LogMutation mutation2 = new LogMutation(update2, testUser);
+    id = confStore.logMutation(mutation2);
+    assertEquals(1, confStore.getPendingMutations().size());
+    confStore.confirmMutation(id, false);
+    assertNull("Configuration should not be updated",
+        confStore.retrieve().get("keyUpdate2"));
+    assertEquals(0, confStore.getPendingMutations().size());
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: HADOOP-14678. AdlFilesystem#initialize swallows exception when getting user name. Contributed by John Zhuge.

Posted by xg...@apache.org.
HADOOP-14678. AdlFilesystem#initialize swallows exception when getting user name. Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f735ad1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f735ad1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f735ad1b

Branch: refs/heads/YARN-5734
Commit: f735ad1b67ed82d9b11b1afd7ae39035a6aed18b
Parents: 369f731
Author: John Zhuge <jz...@cloudera.com>
Authored: Sat Jul 22 12:51:37 2017 -0700
Committer: John Zhuge <jz...@apache.org>
Committed: Fri Jul 28 09:46:54 2017 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java      | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f735ad1b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index e63f115..0de538e 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -129,6 +129,8 @@ public class AdlFileSystem extends FileSystem {
       userName = UserGroupInformation.getCurrentUser().getShortUserName();
     } catch (IOException e) {
       userName = "hadoop";
+      LOG.warn("Got exception when getting Hadoop user name."
+          + " Set the user name to '" + userName + "'.", e);
     }
 
     this.setWorkingDirectory(getHomeDirectory());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: YARN-5548. Use MockRMMemoryStateStore to reduce test failures (Bibin A Chundatt via Varun Saxena)

Posted by xg...@apache.org.
YARN-5548. Use MockRMMemoryStateStore to reduce test failures (Bibin A Chundatt via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f66fd11e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f66fd11e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f66fd11e

Branch: refs/heads/YARN-5734
Commit: f66fd11e514fb326fd1f37a88b444a5276f0947b
Parents: 27a1a5f
Author: Varun Saxena <va...@apache.org>
Authored: Thu Jul 27 23:14:50 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Thu Jul 27 23:14:50 2017 +0530

----------------------------------------------------------------------
 .../v2/app/rm/TestRMContainerAllocator.java     |   7 +-
 .../yarn/server/resourcemanager/MockRM.java     |  14 +-
 .../resourcemanager/TestApplicationCleanup.java |  30 +--
 .../TestContainerResourceUsage.java             |   8 +-
 .../TestNodeBlacklistingOnAMFailures.java       |   8 +-
 .../yarn/server/resourcemanager/TestRMHA.java   |  12 +-
 .../server/resourcemanager/TestRMRestart.java   | 228 ++++++++-----------
 .../TestWorkPreservingRMRestart.java            | 117 ++++------
 .../applicationsmanager/TestAMRestart.java      |  29 +--
 .../rmapp/TestApplicationLifetimeMonitor.java   |   7 +-
 .../scheduler/TestAbstractYarnScheduler.java    |   7 +-
 .../capacity/TestApplicationPriority.java       |  18 +-
 .../capacity/TestCapacityScheduler.java         |  23 +-
 ...TestWorkPreservingRMRestartForNodeLabel.java |  10 +-
 .../scheduler/fair/TestFairScheduler.java       |   8 +-
 .../security/TestDelegationTokenRenewer.java    |   5 +-
 .../security/TestRMDelegationTokens.java        |   9 +-
 17 files changed, 199 insertions(+), 341 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66fd11e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index 8879362..bc05c62 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -2414,10 +2414,7 @@ public class TestRMContainerAllocator {
     conf.setInt(
         MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT, -1);
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-
-    MyResourceManager rm1 = new MyResourceManager(conf, memStore);
+    MyResourceManager rm1 = new MyResourceManager(conf);
     rm1.start();
 
     // Submit the application
@@ -2504,7 +2501,7 @@ public class TestRMContainerAllocator {
     assertBlacklistAdditionsAndRemovals(0, 0, rm1);
 
     // Phase-2 start 2nd RM is up
-    MyResourceManager rm2 = new MyResourceManager(conf, memStore);
+    MyResourceManager rm2 = new MyResourceManager(conf, rm1.getRMStateStore());
     rm2.start();
     nm1.setResourceTrackerService(rm2.getResourceTrackerService());
     allocator.updateSchedulerProxy(rm2);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66fd11e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index 5a215e5..e967807 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -102,8 +102,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEv
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
-
-
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.YarnVersionInfo;
 import org.apache.log4j.Level;
@@ -174,14 +172,6 @@ public class MockRM extends ResourceManager {
     disableDrainEventsImplicitly = false;
   }
 
-  public class MockRMMemoryStateStore extends MemoryRMStateStore {
-    @SuppressWarnings("rawtypes")
-    @Override
-    protected EventHandler getRMStateStoreEventHandler() {
-      return rmStateStoreEventHandler;
-    }
-  }
-
   public class MockRMNullStateStore extends NullRMStateStore {
     @SuppressWarnings("rawtypes")
     @Override
@@ -1294,4 +1284,8 @@ public class MockRM extends ResourceManager {
       ((AsyncDispatcher) getRmDispatcher()).disableExitOnDispatchException();
     }
   }
+
+  public RMStateStore getRMStateStore() {
+    return getRMContext().getStateStore();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66fd11e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
index 422b7eb..ebca7a3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
@@ -153,7 +153,6 @@ public class TestApplicationCleanup {
     rm.stop();
   }
 
-  @SuppressWarnings("resource")
   @Test
   public void testContainerCleanup() throws Exception {
 
@@ -291,11 +290,8 @@ public class TestApplicationCleanup {
   @Test (timeout = 60000)
   public void testAppCleanupWhenRMRestartedAfterAppFinished() throws Exception {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-
     // start RM
-    MockRM rm1 = new MockRM(conf, memStore);
+    MockRM rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -308,7 +304,7 @@ public class TestApplicationCleanup {
     rm1.waitForState(app0.getApplicationId(), RMAppState.FAILED);
 
     // start new RM
-    MockRM rm2 = new MockRM(conf, memStore);
+    MockRM rm2 = new MockRM(conf, rm1.getRMStateStore());
     rm2.start();
     
     // nm1 register to rm2, and do a heartbeat
@@ -327,11 +323,9 @@ public class TestApplicationCleanup {
   @Test(timeout = 60000)
   public void testAppCleanupWhenRMRestartedBeforeAppFinished() throws Exception {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
 
     // start RM
-    MockRM rm1 = new MockRM(conf, memStore);
+    MockRM rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 1024, rm1.getResourceTrackerService());
@@ -357,7 +351,7 @@ public class TestApplicationCleanup {
     }
 
     // start new RM
-    MockRM rm2 = new MockRM(conf, memStore);
+    MockRM rm2 = new MockRM(conf, rm1.getRMStateStore());
     rm2.start();
 
     // nm1/nm2 register to rm2, and do a heartbeat
@@ -383,16 +377,12 @@ public class TestApplicationCleanup {
     rm2.stop();
   }
 
-  @SuppressWarnings("resource")
   @Test (timeout = 60000)
   public void testContainerCleanupWhenRMRestartedAppNotRegistered() throws
       Exception {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-
     // start RM
-    MockRM rm1 = new MockRM(conf, memStore);
+    MockRM rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -405,7 +395,7 @@ public class TestApplicationCleanup {
     rm1.waitForState(app0.getApplicationId(), RMAppState.RUNNING);
 
     // start new RM
-    MockRM rm2 = new MockRM(conf, memStore);
+    MockRM rm2 = new MockRM(conf, rm1.getRMStateStore());
     rm2.start();
 
     // nm1 register to rm2, and do a heartbeat
@@ -426,11 +416,9 @@ public class TestApplicationCleanup {
   @Test (timeout = 60000)
   public void testAppCleanupWhenNMReconnects() throws Exception {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
 
     // start RM
-    MockRM rm1 = new MockRM(conf, memStore);
+    MockRM rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -466,11 +454,9 @@ public class TestApplicationCleanup {
   @Test(timeout = 60000)
   public void testProcessingNMContainerStatusesOnNMRestart() throws Exception {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
 
     // 1. Start the cluster-RM,NM,Submit app with 1024MB,Launch & register AM
-    MockRM rm1 = new MockRM(conf, memStore);
+    MockRM rm1 = new MockRM(conf);
     rm1.start();
     int nmMemory = 8192;
     int amMemory = 1024;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66fd11e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
index 3db00a2..ba9de6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
@@ -138,10 +138,8 @@ public class TestContainerResourceUsage {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
     conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
     conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-
-    MockRM rm0 = new MockRM(conf, memStore);
+    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    MockRM rm0 = new MockRM(conf);
     rm0.start();
     MockNM nm =
         new MockNM("127.0.0.1:1234", 65536, rm0.getResourceTrackerService());
@@ -229,7 +227,7 @@ public class TestContainerResourceUsage {
         vcoreSeconds, metricsBefore.getVcoreSeconds());
 
     // create new RM to represent RM restart. Load up the state store.
-    MockRM rm1 = new MockRM(conf, memStore);
+    MockRM rm1 = new MockRM(conf, rm0.getRMStateStore());
     rm1.start();
     RMApp app0After =
         rm1.getRMContext().getRMApps().get(app0.getApplicationId());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66fd11e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
index 75ef5c7..5266210 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
@@ -413,11 +413,9 @@ public class TestNodeBlacklistingOnAMFailures {
   }
 
   private MockRM startRM(YarnConfiguration conf) {
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-
-    MockRM rm = new MockRM(conf, memStore);
-
+    conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
+    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    MockRM rm = new MockRM(conf);
     rm.start();
     return rm;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66fd11e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
index ec6b1e6..b5293a5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
@@ -415,7 +415,7 @@ public class TestRMHA {
     configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
     Configuration conf = new YarnConfiguration(configuration);
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore() {
+    MemoryRMStateStore memStore = new MockRMMemoryStateStore() {
       int count = 0;
 
       @Override
@@ -465,7 +465,7 @@ public class TestRMHA {
     configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
     Configuration conf = new YarnConfiguration(configuration);
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore() {
+    MemoryRMStateStore memStore = new MockRMMemoryStateStore() {
       @Override
       public void updateApplicationState(ApplicationStateData appState) {
         notifyStoreOperationFailed(new StoreFencedException());
@@ -530,12 +530,10 @@ public class TestRMHA {
     configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
     configuration.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
     Configuration conf = new YarnConfiguration(configuration);
-
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
+    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
 
     // 1. start RM
-    rm = new MockRM(conf, memStore);
+    rm = new MockRM(conf);
     rm.init(conf);
     rm.start();
 
@@ -565,7 +563,7 @@ public class TestRMHA {
     verifyClusterMetrics(0, 0, 0, 0, 0, 0);
 
     // 3. Create new RM
-    rm = new MockRM(conf, memStore) {
+    rm = new MockRM(conf, rm.getRMStateStore()) {
       @Override
       protected ResourceTrackerService createResourceTrackerService() {
         return new ResourceTrackerService(this.rmContext,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66fd11e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
index 955b4b6..f9d0eae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
@@ -178,24 +178,23 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
     return rm;
   }
 
-  @SuppressWarnings("rawtypes")
+  private MockRM createMockRM(YarnConfiguration config) {
+    MockRM rm = new MockRM(config);
+    rms.add(rm);
+    return rm;
+  }
+
   @Test (timeout=180000)
   public void testRMRestart() throws Exception {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
         YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    RMState rmState = memStore.getState();
+    // PHASE 1: create RM and get state
+    MockRM rm1 = createMockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     Map<ApplicationId, ApplicationStateData> rmAppState =
-                                                  rmState.getApplicationState();
-    
-    
-    // PHASE 1: create state in an RM
-    
-    // start RM
-    MockRM rm1 = createMockRM(conf, memStore);
-    
+        memStore.getState().getApplicationState();
+
     // start like normal because state is empty
     rm1.start();
     
@@ -451,14 +450,12 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
   public void testRMRestartAppRunningAMFailed() throws Exception {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
       YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    RMState rmState = memStore.getState();
-    Map<ApplicationId, ApplicationStateData> rmAppState =
-        rmState.getApplicationState();
 
-    // start RM
-    MockRM rm1 = createMockRM(conf, memStore);
+    // Create RM
+    MockRM rm1 = createMockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
+    Map<ApplicationId, ApplicationStateData> rmAppState =
+        memStore.getState().getApplicationState();
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -508,14 +505,13 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
     // be started immediately.
     YarnConfiguration conf = new YarnConfiguration(this.conf);
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 40);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    RMState rmState = memStore.getState();
+
+    // create RM
+    MockRM rm1 = createMockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     Map<ApplicationId, ApplicationStateData> rmAppState =
-        rmState.getApplicationState();
-    
+        memStore.getState().getApplicationState();
     // start RM
-    final MockRM rm1 = createMockRM(conf, memStore);
     rm1.start();
     AbstractYarnScheduler ys =
         (AbstractYarnScheduler)rm1.getResourceScheduler();
@@ -674,7 +670,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
   @Test (timeout = 60000)
   public void testRMRestartWaitForPreviousSucceededAttempt() throws Exception {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
-    MemoryRMStateStore memStore = new MemoryRMStateStore() {
+    MemoryRMStateStore memStore = new MockRMMemoryStateStore() {
       int count = 0;
 
       @Override
@@ -727,14 +723,12 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
   @Test (timeout = 60000)
   public void testRMRestartFailedApp() throws Exception {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    RMState rmState = memStore.getState();
+    // create RM
+    MockRM rm1 = createMockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     Map<ApplicationId, ApplicationStateData> rmAppState =
-        rmState.getApplicationState();
-
+        memStore.getState().getApplicationState();
     // start RM
-    MockRM rm1 = createMockRM(conf, memStore);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -775,14 +769,12 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
   public void testRMRestartKilledApp() throws Exception{
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
       YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    RMState rmState = memStore.getState();
+    // create RM
+    MockRM rm1 = createMockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     Map<ApplicationId, ApplicationStateData> rmAppState =
-        rmState.getApplicationState();
-
+        memStore.getState().getApplicationState();
     // start RM
-    MockRM rm1 = createMockRM(conf, memStore);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -823,7 +815,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
 
   @Test (timeout = 60000)
   public void testRMRestartKilledAppWithNoAttempts() throws Exception {
-    MemoryRMStateStore memStore = new MemoryRMStateStore() {
+    MemoryRMStateStore memStore = new MockRMMemoryStateStore() {
       @Override
       public synchronized void storeApplicationAttemptStateInternal(
           ApplicationAttemptId attemptId,
@@ -865,14 +857,13 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
   public void testRMRestartSucceededApp() throws Exception {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
       YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    RMState rmState = memStore.getState();
+    // PHASE 1: create RM and get state
+    MockRM rm1 = createMockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     Map<ApplicationId, ApplicationStateData> rmAppState =
-        rmState.getApplicationState();
+        memStore.getState().getApplicationState();
 
-    // start RM
-    MockRM rm1 = createMockRM(conf, memStore);
+    // start like normal because state is empty
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -913,11 +904,8 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
   @Test (timeout = 60000)
   public void testRMRestartGetApplicationList() throws Exception {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-
     // start RM
-    MockRM rm1 = new MockRM(conf, memStore) {
+    MockRM rm1 = new MockRM(conf) {
       @Override
       protected SystemMetricsPublisher createSystemMetricsPublisher() {
         return spy(super.createSystemMetricsPublisher());
@@ -956,7 +944,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
     .appCreated(any(RMApp.class), anyLong());
     // restart rm
 
-    MockRM rm2 = new MockRM(conf, memStore) {
+    MockRM rm2 = new MockRM(conf, rm1.getRMStateStore()) {
       @Override
       protected RMAppManager createRMAppManager() {
         return spy(super.createRMAppManager());
@@ -1077,13 +1065,12 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
         YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    RMState rmState = memStore.getState();
-
+    // create RM
+    MockRM rm1 = createMockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     Map<ApplicationId, ApplicationStateData> rmAppState =
-        rmState.getApplicationState();  
-    MockRM rm1 = createMockRM(conf, memStore);
+        memStore.getState().getApplicationState();
+    // start RM
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -1146,16 +1133,15 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
   public void testRMRestartTimelineCollectorContext() throws Exception {
     conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
     conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    RMState rmState = memStore.getState();
-    Map<ApplicationId, ApplicationStateData> rmAppState =
-        rmState.getApplicationState();
+
     MockRM rm1 = null;
     MockRM rm2 = null;
     try {
-      rm1 = createMockRM(conf, memStore);
+      rm1 = createMockRM(conf);
       rm1.start();
+      MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
+      Map<ApplicationId, ApplicationStateData> rmAppState =
+          memStore.getState().getApplicationState();
       MockNM nm1 =
           new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
       nm1.registerNode();
@@ -1212,13 +1198,12 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
         "kerberos");
     UserGroupInformation.setConfiguration(conf);
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    RMState rmState = memStore.getState();
-
+    // create RM
+    MockRM rm1 = new TestSecurityMockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     Map<ApplicationId, ApplicationStateData> rmAppState =
-        rmState.getApplicationState();
-    MockRM rm1 = new TestSecurityMockRM(conf, memStore);
+        memStore.getState().getApplicationState();
+    // start RM
     rm1.start();
 
     HashSet<Token<RMDelegationTokenIdentifier>> tokenSet =
@@ -1307,13 +1292,12 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
       "kerberos");
     UserGroupInformation.setConfiguration(conf);
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    RMState rmState = memStore.getState();
-
+    // create RM
+    MockRM rm1 = new TestSecurityMockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     Map<ApplicationId, ApplicationStateData> rmAppState =
-        rmState.getApplicationState();
-    MockRM rm1 = new TestSecurityMockRM(conf, memStore);
+        memStore.getState().getApplicationState();
+    // start RM
     rm1.start();
     MockNM nm1 =
         new MockNM("0.0.0.0:4321", 15120, rm1.getResourceTrackerService());
@@ -1388,8 +1372,10 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
         "kerberos");
     conf.set(YarnConfiguration.RM_ADDRESS, "localhost:8032");
     UserGroupInformation.setConfiguration(conf);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
+
+    MockRM rm1 = new TestSecurityMockRM(conf);
+    rm1.start();
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     RMState rmState = memStore.getState();
 
     Map<ApplicationId, ApplicationStateData> rmAppState =
@@ -1399,10 +1385,6 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
     Set<DelegationKey> rmDTMasterKeyState =
         rmState.getRMDTSecretManagerState().getMasterKeyState();
 
-    MockRM rm1 = new TestSecurityMockRM(conf, memStore);
-
-    rm1.start();
-
     // create an empty credential
     Credentials ts = new Credentials();
 
@@ -1537,10 +1519,8 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
         "kerberos");
     conf.set(YarnConfiguration.RM_ADDRESS, "localhost:8032");
     UserGroupInformation.setConfiguration(conf);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
 
-    MockRM rm1 = new TestSecurityMockRM(conf, memStore);
+    MockRM rm1 = new TestSecurityMockRM(conf);
     rm1.start();
 
     GetDelegationTokenRequest request1 =
@@ -1553,7 +1533,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
         ConverterUtils.convertFromYarn(response1.getRMDelegationToken(), rmAddr);
 
     // start new RM
-    MockRM rm2 = new TestSecurityMockRM(conf, memStore);
+    MockRM rm2 = new TestSecurityMockRM(conf, rm1.getRMStateStore());
     rm2.start();
 
     // submit an app with the old delegation token got from previous RM.
@@ -1631,14 +1611,13 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
 
   @Test (timeout = 60000)
   public void testFinishedAppRemovalAfterRMRestart() throws Exception {
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
     conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS, 1);
-    memStore.init(conf);
-    RMState rmState = memStore.getState();
 
     // start RM
-    MockRM rm1 = createMockRM(conf, memStore);
+    MockRM rm1 = createMockRM(conf);
     rm1.start();
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
+    RMState rmState = memStore.getState();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
     nm1.registerNode();
@@ -1676,7 +1655,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
   // This is to test RM does not get hang on shutdown.
   @Test (timeout = 10000)
   public void testRMShutdown() throws Exception {
-    MemoryRMStateStore memStore = new MemoryRMStateStore() {
+    MemoryRMStateStore memStore = new MockRMMemoryStateStore() {
       @Override
       public synchronized void checkVersion()
           throws Exception {
@@ -1742,10 +1721,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
       "kerberos");
     UserGroupInformation.setConfiguration(conf);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-
-    MockRM rm1 = new TestSecurityMockRM(conf, memStore) {
+    MockRM rm1 = new TestSecurityMockRM(conf) {
       class TestDelegationTokenRenewer extends DelegationTokenRenewer {
         public void addApplicationAsync(ApplicationId applicationId, Credentials ts,
             boolean shouldCancelAtEnd, String user, Configuration appConf) {
@@ -1758,6 +1734,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
       }
     };
     rm1.start();
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     RMApp app1 = null;
     try {
        app1 = rm1.submitApp(200, "name", "user",
@@ -1781,7 +1758,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
 
   @Test (timeout = 20000)
   public void testAppRecoveredInOrderOnRMRestart() throws Exception {
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
+    MemoryRMStateStore memStore = new MockRMMemoryStateStore();
     memStore.init(conf);
 
     for (int i = 10; i > 0; i--) {
@@ -1836,12 +1813,8 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
   public void testQueueMetricsOnRMRestart() throws Exception {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
         YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-
-    // PHASE 1: create state in an RM
     // start RM
-    MockRM rm1 = createMockRM(conf, memStore);
+    MockRM rm1 = createMockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -1879,7 +1852,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
 
     // PHASE 2: create new RM and start from old state
     // create new RM to represent restart and recover state
-    MockRM rm2 = createMockRM(conf, memStore);
+    MockRM rm2 = createMockRM(conf, rm1.getRMStateStore());
     QueueMetrics qm2 = rm2.getResourceScheduler().getRootQueueMetrics();
     resetQueueMetrics(qm2);
     assertQueueMetrics(qm2, 0, 0, 0, 0);
@@ -1960,7 +1933,6 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
 
   @Test (timeout = 60000)
   public void testDecomissionedNMsMetricsOnRMRestart() throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
     conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,
       hostFile.getAbsolutePath());
     writeToHostsFile("");
@@ -2039,11 +2011,9 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
       "kerberos");
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
 
     // start RM
-    MockRM rm1 = createMockRM(conf, memStore);
+    MockRM rm1 = createMockRM(conf);
     rm1.start();
     final MockNM nm1 =
         new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -2051,7 +2021,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
     RMApp app0 = rm1.submitApp(200);
     final MockAM am0 = MockRM.launchAndRegisterAM(app0, rm1, nm1);
 
-    MockRM rm2 = new MockRM(conf, memStore) {
+    MockRM rm2 = new MockRM(conf, rm1.getRMStateStore()) {
       @Override
       protected ResourceTrackerService createResourceTrackerService() {
         return new ResourceTrackerService(this.rmContext,
@@ -2158,6 +2128,10 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
       super(conf, store);
     }
 
+    public TestSecurityMockRM(Configuration conf) {
+      super(conf);
+    }
+
     @Override
     public void init(Configuration conf) {
       // reset localServiceAddress.
@@ -2208,10 +2182,8 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
     conf.set(YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR,
         nodeLabelFsStoreDirURI);
     
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
     conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
-    MockRM rm1 = new MockRM(conf, memStore) {
+    MockRM rm1 = new MockRM(conf) {
       @Override
       protected RMNodeLabelsManager createNodeLabelManager() {
         RMNodeLabelsManager mgr = new RMNodeLabelsManager();
@@ -2261,7 +2233,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
     Assert.assertEquals(1, nodeLabelManager.getNodeLabels().size());
     Assert.assertTrue(nodeLabels.get(n1).equals(toSet("y")));
 
-    MockRM rm2 = new MockRM(conf, memStore) {
+    MockRM rm2 = new MockRM(conf, rm1.getRMStateStore()) {
       @Override
       protected RMNodeLabelsManager createNodeLabelManager() {
         RMNodeLabelsManager mgr = new RMNodeLabelsManager();
@@ -2290,14 +2262,12 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
     int maxAttempt =
         conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
             YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    RMState rmState = memStore.getState();
+    // create RM
+    MockRM rm1 = createMockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     Map<ApplicationId, ApplicationStateData> rmAppState =
-        rmState.getApplicationState();
-
+        memStore.getState().getApplicationState();
     // start RM
-    MockRM rm1 = createMockRM(conf, memStore);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -2365,10 +2335,8 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
     conf.set(YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR,
         nodeLabelFsStoreDirURI);
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
     conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
-    MockRM rm1 = new MockRM(conf, memStore) {
+    MockRM rm1 = new MockRM(conf) {
       @Override
       protected RMNodeLabelsManager createNodeLabelManager() {
         RMNodeLabelsManager mgr = new RMNodeLabelsManager();
@@ -2396,7 +2364,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
     nodeLabelManager.replaceLabelsOnNode(ImmutableMap.of(n1, toSet("x")));
     MockRM rm2 = null;
     for (int i = 0; i < 2; i++) {
-      rm2 = new MockRM(conf, memStore) {
+      rm2 = new MockRM(conf, rm1.getRMStateStore()) {
         @Override
         protected RMNodeLabelsManager createNodeLabelManager() {
           RMNodeLabelsManager mgr = new RMNodeLabelsManager();
@@ -2419,15 +2387,12 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
 
   @Test(timeout = 120000)
   public void testRMRestartAfterPreemption() throws Exception {
-    Configuration conf = new Configuration();
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
     if (!getSchedulerType().equals(SchedulerType.CAPACITY)) {
       return;
     }
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
     // start RM
-    MockRM rm1 = new MockRM(conf, memStore);
+    MockRM rm1 = new MockRM(conf);
     rm1.start();
     CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
 
@@ -2466,7 +2431,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
     MockRM rm2 = null;
     // start RM2
     try {
-      rm2 = new MockRM(conf, memStore);
+      rm2 = new MockRM(conf, rm1.getRMStateStore());
       rm2.start();
       Assert.assertTrue("RM start successfully", true);
     } catch (Exception e) {
@@ -2480,11 +2445,10 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
   @Test(timeout = 60000)
   public void testRMRestartOnMissingAttempts() throws Exception {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 5);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-
+    // create RM
+    MockRM rm1 = createMockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     // start RM
-    MockRM rm1 = createMockRM(conf, memStore);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -2540,13 +2504,10 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
 
   @Test(timeout = 60000)
   public void testRMRestartAfterNodeLabelDisabled() throws Exception {
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-
     conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
 
     MockRM rm1 = new MockRM(
-        TestUtils.getConfigurationWithDefaultQueueLabels(conf), memStore) {
+        TestUtils.getConfigurationWithDefaultQueueLabels(conf)) {
       @Override
       protected RMNodeLabelsManager createNodeLabelManager() {
         RMNodeLabelsManager mgr = new RMNodeLabelsManager();
@@ -2580,7 +2541,8 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
     // restart rm with node label disabled
     conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, false);
     MockRM rm2 = new MockRM(
-        TestUtils.getConfigurationWithDefaultQueueLabels(conf), memStore) {
+        TestUtils.getConfigurationWithDefaultQueueLabels(conf),
+        rm1.getRMStateStore()) {
       @Override
       protected RMNodeLabelsManager createNodeLabelManager() {
         RMNodeLabelsManager mgr = new RMNodeLabelsManager();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66fd11e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
index c4cf256..d028227 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
@@ -148,9 +148,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     int containerMemory = 1024;
     Resource containerResource = Resource.newInstance(containerMemory, 1);
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    rm1 = new MockRM(conf, memStore);
+    rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
@@ -162,7 +160,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     rm1.clearQueueMetrics(app1);
 
     // Re-start RM
-    rm2 = new MockRM(conf, memStore);
+    rm2 = new MockRM(conf, rm1.getRMStateStore());
     rm2.start();
     nm1.setResourceTrackerService(rm2.getResourceTrackerService());
     // recover app
@@ -296,9 +294,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     int containerMemory = 1024;
     Resource containerResource = Resource.newInstance(containerMemory, 1);
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(schedulerConf);
-    rm1 = new MockRM(schedulerConf, memStore);
+    rm1 = new MockRM(schedulerConf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
@@ -316,7 +312,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     rm1.clearQueueMetrics(app1);
 
     // 3. Fail over (restart) RM.
-    rm2 = new MockRM(schedulerConf, memStore);
+    rm2 = new MockRM(schedulerConf, rm1.getRMStateStore());
     rm2.start();
     nm1.setResourceTrackerService(rm2.getResourceTrackerService());
     // 4. Validate app is recovered post failover.
@@ -570,9 +566,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
   public void testRMRestartWithRemovedQueue() throws Exception{
     conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
     conf.set(YarnConfiguration.YARN_ADMIN_ACL, "");
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    rm1 = new MockRM(conf, memStore);
+    rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
@@ -585,7 +579,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[]{QUEUE_DOESNT_EXIST});
     final String noQueue = CapacitySchedulerConfiguration.ROOT + "." + QUEUE_DOESNT_EXIST;
     csConf.setCapacity(noQueue, 100);
-    rm2 = new MockRM(csConf,memStore);
+    rm2 = new MockRM(csConf, rm1.getRMStateStore());
 
     rm2.start();
     UserGroupInformation user2 = UserGroupInformation.createRemoteUser("user2");
@@ -622,9 +616,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     CapacitySchedulerConfiguration csConf =
         new CapacitySchedulerConfiguration(conf);
     setupQueueConfiguration(csConf);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(csConf);
-    rm1 = new MockRM(csConf, memStore);
+    rm1 = new MockRM(csConf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
@@ -648,7 +640,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     csConf.set(PREFIX + "root.Default.QueueB.state", "STOPPED");
 
     // Re-start RM
-    rm2 = new MockRM(csConf, memStore);
+    rm2 = new MockRM(csConf, rm1.getRMStateStore());
     rm2.start();
     nm1.setResourceTrackerService(rm2.getResourceTrackerService());
     nm2.setResourceTrackerService(rm2.getResourceTrackerService());
@@ -783,9 +775,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     CapacitySchedulerConfiguration csConf =
         new CapacitySchedulerConfiguration(conf);
     setupQueueConfiguration(csConf);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(csConf);
-    rm1 = new MockRM(csConf, memStore);
+    rm1 = new MockRM(csConf);
     rm1.start();
     MockNM nm =
         new MockNM("127.1.1.1:4321", 8192, rm1.getResourceTrackerService());
@@ -798,7 +788,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
         getYarnApplicationState(), YarnApplicationState.RUNNING);
 
     // Take a copy of state store so that it can be reset to this state.
-    RMState state = memStore.loadState();
+    RMState state = rm1.getRMStateStore().loadState();
 
     // Change scheduler config with child queues added to QueueB.
     csConf = new CapacitySchedulerConfiguration(conf);
@@ -806,7 +796,8 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
 
     String diags = "Application killed on recovery as it was submitted to " +
         "queue QueueB which is no longer a leaf queue after restart.";
-    verifyAppRecoveryWithWrongQueueConfig(csConf, app, diags, memStore, state);
+    verifyAppRecoveryWithWrongQueueConfig(csConf, app, diags,
+        (MemoryRMStateStore) rm1.getRMStateStore(), state);
   }
 
   //Test behavior of an app if queue is removed during recovery. Test case does
@@ -829,9 +820,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     CapacitySchedulerConfiguration csConf =
         new CapacitySchedulerConfiguration(conf);
     setupQueueConfiguration(csConf);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(csConf);
-    rm1 = new MockRM(csConf, memStore);
+    rm1 = new MockRM(csConf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
@@ -860,7 +849,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     rm1.clearQueueMetrics(app2);
 
     // Take a copy of state store so that it can be reset to this state.
-    RMState state = memStore.loadState();
+    RMState state = rm1.getRMStateStore().loadState();
 
     // Set new configuration with QueueB removed.
     csConf = new CapacitySchedulerConfiguration(conf);
@@ -868,7 +857,8 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
 
     String diags = "Application killed on recovery as it was submitted to " +
         "queue QueueB which no longer exists after restart.";
-    verifyAppRecoveryWithWrongQueueConfig(csConf, app2, diags, memStore, state);
+    verifyAppRecoveryWithWrongQueueConfig(csConf, app2, diags,
+        (MemoryRMStateStore) rm1.getRMStateStore(), state);
   }
 
   private void checkParentQueue(ParentQueue parentQueue, int numContainers,
@@ -883,10 +873,8 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
   // should not recover the containers that belong to the failed AM.
   @Test(timeout = 20000)
   public void testAMfailedBetweenRMRestart() throws Exception {
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
     conf.setLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 0);
-    memStore.init(conf);
-    rm1 = new MockRM(conf, memStore);
+    rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
@@ -894,7 +882,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     RMApp app1 = rm1.submitApp(200);
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
 
-    rm2 = new MockRM(conf, memStore);
+    rm2 = new MockRM(conf, rm1.getRMStateStore());
     rm2.start();
     nm1.setResourceTrackerService(rm2.getResourceTrackerService());
 
@@ -937,9 +925,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
   // recover containers for completed apps.
   @Test(timeout = 20000)
   public void testContainersNotRecoveredForCompletedApps() throws Exception {
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    rm1 = new MockRM(conf, memStore);
+    rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
@@ -948,7 +934,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
     MockRM.finishAMAndVerifyAppState(app1, rm1, nm1, am1);
 
-    rm2 = new MockRM(conf, memStore);
+    rm2 = new MockRM(conf, rm1.getRMStateStore());
     rm2.start();
     nm1.setResourceTrackerService(rm2.getResourceTrackerService());
     NMContainerStatus runningContainer =
@@ -975,11 +961,9 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
   @Test (timeout = 600000)
   public void testAppReregisterOnRMWorkPreservingRestart() throws Exception {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
 
     // start RM
-    rm1 = new MockRM(conf, memStore);
+    rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -993,7 +977,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     am0.registerAppAttempt();
 
     // start new RM
-    rm2 = new MockRM(conf, memStore);
+    rm2 = new MockRM(conf, rm1.getRMStateStore());
     rm2.start();
     rm2.waitForState(app0.getApplicationId(), RMAppState.ACCEPTED);
     rm2.waitForState(am0.getApplicationAttemptId(), RMAppAttemptState.LAUNCHED);
@@ -1008,9 +992,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
   
   @Test (timeout = 30000)
   public void testAMContainerStatusWithRMRestart() throws Exception {  
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    rm1 = new MockRM(conf, memStore);
+    rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
@@ -1025,7 +1007,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
         attempt0.getMasterContainer().getId()).isAMContainer());
 
     // Re-start RM
-    rm2 = new MockRM(conf, memStore);
+    rm2 = new MockRM(conf, rm1.getRMStateStore());
     rm2.start();
     nm1.setResourceTrackerService(rm2.getResourceTrackerService());
 
@@ -1044,9 +1026,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
   @Test (timeout = 20000)
   public void testRecoverSchedulerAppAndAttemptSynchronously() throws Exception {
     // start RM
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    rm1 = new MockRM(conf, memStore);
+    rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -1056,7 +1036,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     RMApp app0 = rm1.submitApp(200);
     MockAM am0 = MockRM.launchAndRegisterAM(app0, rm1, nm1);
 
-    rm2 = new MockRM(conf, memStore);
+    rm2 = new MockRM(conf, rm1.getRMStateStore());
     rm2.start();
     nm1.setResourceTrackerService(rm2.getResourceTrackerService());
     // scheduler app/attempt is immediately available after RM is re-started.
@@ -1077,9 +1057,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
   // container should not be recovered.
   @Test (timeout = 50000)
   public void testReleasedContainerNotRecovered() throws Exception {
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    rm1 = new MockRM(conf, memStore);
+    rm1 = new MockRM(conf);
     MockNM nm1 = new MockNM("h1:1234", 15120, rm1.getResourceTrackerService());
     nm1.registerNode();
     rm1.start();
@@ -1089,7 +1067,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
 
     // Re-start RM
     conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 8000);
-    rm2 = new MockRM(conf, memStore);
+    rm2 = new MockRM(conf, rm1.getRMStateStore());
     rm2.start();
     nm1.setResourceTrackerService(rm2.getResourceTrackerService());
     rm2.waitForState(app1.getApplicationId(), RMAppState.ACCEPTED);
@@ -1175,9 +1153,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
       throws Exception {
     conf.setLong(
       YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 4000);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    rm1 = new MockRM(conf, memStore);
+    rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
@@ -1186,7 +1162,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
 
     // Restart RM
-    rm2 = new MockRM(conf, memStore);
+    rm2 = new MockRM(conf, rm1.getRMStateStore());
     rm2.start();
     nm1.setResourceTrackerService(rm2.getResourceTrackerService());
     nm1.registerNode();
@@ -1229,11 +1205,8 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
   public void testRetriedFinishApplicationMasterRequest()
       throws Exception {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-
     // start RM
-    rm1 = new MockRM(conf, memStore);
+    rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -1253,7 +1226,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
 
 
     // start new RM
-    rm2 = new MockRM(conf, memStore);
+    rm2 = new MockRM(conf, rm1.getRMStateStore());
     rm2.start();
 
     am0.setAMRMProtocol(rm2.getApplicationMasterService(), rm2.getRMContext());
@@ -1266,9 +1239,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
       "kerberos");
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
     UserGroupInformation.setConfiguration(conf);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    MockRM rm1 = new TestSecurityMockRM(conf, memStore);
+    MockRM rm1 = new TestSecurityMockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
@@ -1276,7 +1247,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     RMApp app1 = rm1.submitApp(200);
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
 
-    MockRM rm2 = new TestSecurityMockRM(conf, memStore) {
+    MockRM rm2 = new TestSecurityMockRM(conf, rm1.getRMStateStore()) {
       protected DelegationTokenRenewer createDelegationTokenRenewer() {
         return new DelegationTokenRenewer() {
           @Override
@@ -1313,9 +1284,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
    */
   @Test (timeout = 30000)
   public void testAppFailToValidateResourceRequestOnRecovery() throws Exception{
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    rm1 = new MockRM(conf, memStore);
+    rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
@@ -1328,16 +1297,14 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 50);
     conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 100);
 
-    rm2 = new MockRM(conf, memStore);
+    rm2 = new MockRM(conf, rm1.getRMStateStore());
     nm1.setResourceTrackerService(rm2.getResourceTrackerService());
     rm2.start();
   }
 
   @Test(timeout = 20000)
   public void testContainerCompleteMsgNotLostAfterAMFailedAndRMRestart() throws Exception {
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    rm1 = new MockRM(conf, memStore);
+    rm1 = new MockRM(conf);
     rm1.start();
 
     MockNM nm1 =
@@ -1370,7 +1337,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     MockAM am1 = MockRM.launchAndRegisterAM(app0, rm1, nm1);
 
     // rm failover
-    rm2 = new MockRM(conf, memStore);
+    rm2 = new MockRM(conf, rm1.getRMStateStore());
     rm2.start();
     nm1.setResourceTrackerService(rm2.getResourceTrackerService());
 
@@ -1439,11 +1406,9 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
   @Test(timeout = 600000)
   public void testUAMRecoveryOnRMWorkPreservingRestart() throws Exception {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
 
     // start RM
-    rm1 = new MockRM(conf, memStore);
+    rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
@@ -1471,7 +1436,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     Assert.assertFalse(conts.isEmpty());
 
     // start new RM
-    rm2 = new MockRM(conf, memStore);
+    rm2 = new MockRM(conf, rm1.getRMStateStore());
     rm2.start();
     rm2.waitForState(app0.getApplicationId(), RMAppState.ACCEPTED);
     rm2.waitForState(am0.getApplicationAttemptId(), RMAppAttemptState.LAUNCHED);
@@ -1521,7 +1486,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
         recoveredApp.getFinalApplicationStatus());
 
     // Restart RM once more to check UAM is not re-run
-    MockRM rm3 = new MockRM(conf, memStore);
+    MockRM rm3 = new MockRM(conf, rm1.getRMStateStore());
     rm3.start();
     recoveredApp = rm3.getRMContext().getRMApps().get(app0.getApplicationId());
     Assert.assertEquals(RMAppState.FINISHED, recoveredApp.getState());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66fd11e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
index f178884..528afac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
@@ -381,9 +380,7 @@ public class TestAMRestart {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
     conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
     conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    MockRM rm1 = new MockRM(conf, memStore);
+    MockRM rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8000, rm1.getResourceTrackerService());
@@ -405,7 +402,9 @@ public class TestAMRestart {
     Assert.assertTrue(! attempt1.shouldCountTowardsMaxAttemptRetry());
     rm1.waitForState(app1.getApplicationId(), RMAppState.ACCEPTED);
     ApplicationStateData appState =
-        memStore.getState().getApplicationState().get(app1.getApplicationId());
+        ((MemoryRMStateStore) rm1.getRMStateStore()).getState()
+            .getApplicationState().get(app1.getApplicationId());
+
     // AM should be restarted even though max-am-attempt is 1.
     MockAM am2 =
         rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(), 2, nm1);
@@ -508,9 +507,7 @@ public class TestAMRestart {
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
     conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
     conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    MockRM rm1 = new MockRM(conf, memStore);
+    MockRM rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8000, rm1.getResourceTrackerService());
@@ -548,10 +545,9 @@ public class TestAMRestart {
 
     conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
 
-    MockRM rm1 = new MockRM(conf, memStore);
+    MockRM rm1 = new MockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8000, rm1.getResourceTrackerService());
@@ -630,10 +626,9 @@ public class TestAMRestart {
     conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
     // explicitly set max-am-retry count as 2.
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
 
-    MockRM rm1 = new MockRM(conf, memStore);
+    MockRM rm1 = new MockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     rm1.start();
     CapacityScheduler scheduler =
         (CapacityScheduler) rm1.getResourceScheduler();
@@ -706,10 +701,8 @@ public class TestAMRestart {
     conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
     // explicitly set max-am-retry count as 2.
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-
-    MockRM rm1 = new MockRM(conf, memStore);
+    MockRM rm1 = new MockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8000, rm1.getResourceTrackerService());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66fd11e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
index fdc47b9..f7e76bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
@@ -164,9 +164,8 @@ public class TestApplicationLifetimeMonitor {
         true);
     conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    MockRM rm1 = new MockRM(conf, memStore);
+    MockRM rm1 = new MockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
@@ -235,8 +234,6 @@ public class TestApplicationLifetimeMonitor {
       throws Exception {
     MockRM rm1 = null;
     try {
-      conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
-
       MemoryRMStateStore memStore = new MemoryRMStateStore() {
         private int count = 0;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66fd11e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
index f1adb5e..60b9e4b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
@@ -59,7 +59,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
@@ -382,10 +381,8 @@ public class TestAbstractYarnScheduler extends ParameterizedSchedulerTestBase {
   @Test(timeout = 10000)
   public void testReleasedContainerIfAppAttemptisNull() throws Exception {
     YarnConfiguration conf=getConf();
-    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    MockRM rm1 = new MockRM(conf, memStore);
+    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    MockRM rm1 = new MockRM(conf);
     try {
       rm1.start();
       MockNM nm1 =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66fd11e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
index fd17bd9..cad0151 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
@@ -26,7 +26,6 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -42,8 +41,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
@@ -405,16 +402,11 @@ public class TestApplicationPriority {
         YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
     conf.setInt(YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY, 10);
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    RMState rmState = memStore.getState();
-    Map<ApplicationId, ApplicationStateData> rmAppState = rmState
-        .getApplicationState();
-
     // PHASE 1: create state in an RM
 
     // start RM
-    MockRM rm1 = new MockRM(conf, memStore);
+    MockRM rm1 = new MockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     rm1.start();
 
     MockNM nm1 = new MockNM("127.0.0.1:1234", 15120,
@@ -611,10 +603,8 @@ public class TestApplicationPriority {
         YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
     conf.setInt(YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY, 10);
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-
-    MockRM rm1 = new MockRM(conf, memStore);
+    MockRM rm1 = new MockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     rm1.start();
 
     MockNM nm1 =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66fd11e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 41a7ce8..0642cd9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -106,7 +106,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MockRMW
 import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MyContainerManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics;
@@ -2965,9 +2964,7 @@ public class TestCapacityScheduler {
         new YarnConfiguration(new CapacitySchedulerConfiguration());
     conf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true);
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    MockRM rm = new MockRM(conf, memStore);
+    MockRM rm = new MockRM(conf);
     rm.start();
 
     HashMap<NodeId, MockNM> nodes = new HashMap<>();
@@ -3129,10 +3126,7 @@ public class TestCapacityScheduler {
         new YarnConfiguration(
             setupQueueConfiguration(new CapacitySchedulerConfiguration()));
     conf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true);
-
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    MockRM rm1 = new MockRM(conf, memStore);
+    MockRM rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 100 * GB, rm1.getResourceTrackerService());
@@ -3212,9 +3206,7 @@ public class TestCapacityScheduler {
     YarnConfiguration conf = new YarnConfiguration(csConf);
     conf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true);
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    MockRM rm1 = new MockRM(conf, memStore);
+    MockRM rm1 = new MockRM(conf);
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 24 * GB, rm1.getResourceTrackerService());
@@ -3259,9 +3251,7 @@ public class TestCapacityScheduler {
     mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y"));
     mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    MockRM rm = new MockRM(conf, memStore) {
+    MockRM rm = new MockRM(conf) {
       protected RMNodeLabelsManager createNodeLabelManager() {
         return mgr;
       }
@@ -3668,9 +3658,8 @@ public class TestCapacityScheduler {
     final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
     mgr.init(conf);
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    MockRM rm = new MockRM(conf, memStore) {
+
+    MockRM rm = new MockRM(conf) {
       protected RMNodeLabelsManager createNodeLabelManager() {
         return mgr;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66fd11e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java
index 0386aab..36ee68e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java
@@ -133,21 +133,17 @@ public class TestWorkPreservingRMRestartForNodeLabel {
     mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"),
         NodeId.newInstance("h2", 0), toSet("y")));
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-    
     conf = TestUtils.getConfigurationWithDefaultQueueLabels(conf);
-    
+
     // inject node label manager
     MockRM rm1 =
-        new MockRM(conf,
-            memStore) {
+        new MockRM(conf) {
           @Override
           public RMNodeLabelsManager createNodeLabelManager() {
             return mgr;
           }
         };
-
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     rm1.getRMContext().setNodeLabelManager(mgr);
     rm1.start();
     MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66fd11e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 2233287..0d54c33 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -5112,12 +5112,10 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     out.println("</allocations>");
     out.close();
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(conf);
-
+    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
     // 3. start a active RM
-    MockRM rm2 = new MockRM(conf, memStore);
-    rm2.init(conf);
+    MockRM rm2 = new MockRM(conf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm2.getRMStateStore();
     rm2.start();
 
     MockNM nm =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66fd11e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
index 0190db6..9fb9d42 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
@@ -1006,9 +1006,8 @@ public class TestDelegationTokenRenewer {
     Credentials credentials = new Credentials();
     credentials.addToken(userText1, originalToken);
 
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
-    memStore.init(yarnConf);
-    MockRM rm1 = new TestSecurityMockRM(yarnConf, memStore);
+    MockRM rm1 = new TestSecurityMockRM(yarnConf);
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     rm1.start();
     RMApp app = rm1.submitApp(200, "name", "user",
         new HashMap<ApplicationAccessType, String>(), false, "default", 1,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66fd11e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
index 80310a5..06c642a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRMMemoryStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.RMSecretManagerService;
 import org.apache.hadoop.yarn.server.resourcemanager.TestRMRestart.TestSecurityMockRM;
@@ -61,10 +62,10 @@ public class TestRMDelegationTokens {
     rootLogger.setLevel(Level.DEBUG);
     ExitUtil.disableSystemExit();
     testConf = new YarnConfiguration();
+    testConf
+        .set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
     UserGroupInformation.setLoginUser(null);
     UserGroupInformation.setConfiguration(testConf);
-    testConf.set(YarnConfiguration.RM_STORE,
-        MemoryRMStateStore.class.getName());
   }
 
   // Test the DT mast key in the state-store when the mast key is being rolled.
@@ -73,7 +74,7 @@ public class TestRMDelegationTokens {
     Configuration conf = new Configuration(testConf);
     conf.set("hadoop.security.authentication", "kerberos");
     UserGroupInformation.setConfiguration(conf);
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
+    MemoryRMStateStore memStore = new MockRMMemoryStateStore();
     memStore.init(conf);
     RMState rmState = memStore.getState();
 
@@ -127,7 +128,7 @@ public class TestRMDelegationTokens {
   // Test all expired keys are removed from state-store.
   @Test(timeout = 15000)
   public void testRemoveExpiredMasterKeyInRMStateStore() throws Exception {
-    MemoryRMStateStore memStore = new MemoryRMStateStore();
+    MemoryRMStateStore memStore = new MockRMMemoryStateStore();
     memStore.init(testConf);
     RMState rmState = memStore.getState();
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: HADOOP-14690. RetryInvocationHandler should override toString(). Contributed by Yeliang Cang.

Posted by xg...@apache.org.
HADOOP-14690. RetryInvocationHandler should override toString(). Contributed by Yeliang Cang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f14be0d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f14be0d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f14be0d2

Branch: refs/heads/YARN-5734
Commit: f14be0d24126747887ddc7580f4a9a70768de23d
Parents: f8bed5e
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon Jul 31 14:08:30 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon Jul 31 14:08:30 2017 +0900

----------------------------------------------------------------------
 .../apache/hadoop/io/retry/RetryInvocationHandler.java   | 11 +++++++++++
 1 file changed, 11 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f14be0d2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
index ffdd928..9f01c39 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
@@ -295,6 +295,17 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
 
       return new RetryInfo(maxRetryDelay, max, expectedFailoverCount, ex);
     }
+
+    @Override
+    public String toString() {
+      return "RetryInfo{" +
+              "retryTime=" + retryTime +
+              ", delay=" + delay +
+              ", action=" + action +
+              ", expectedFailoverCount=" + expectedFailoverCount +
+              ", failException=" + failException +
+              '}';
+    }
   }
 
   private final ProxyDescriptor<T> proxyDescriptor;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: HADOOP-11875. [JDK9] Adding a second copy of Hamlet without _ as a one-character identifier.

Posted by xg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletSpec.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletSpec.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletSpec.java
new file mode 100644
index 0000000..8aeba93
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletSpec.java
@@ -0,0 +1,3101 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.webapp.hamlet2;
+
+import java.lang.annotation.*;
+import java.util.EnumSet;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.yarn.webapp.SubView;
+
+/**
+ * HTML5 compatible HTML4 builder interfaces.
+ *
+ * <p>Generated from HTML 4.01 strict DTD and HTML5 diffs.
+ * <br>cf. http://www.w3.org/TR/html4/
+ * <br>cf. http://www.w3.org/TR/html5-diff/
+ * <p> The omitted attributes and elements (from the 4.01 DTD)
+ * are for HTML5 compatibility.
+ *
+ * <p>Note, the common argument selector uses the same syntax as Haml/Sass:
+ * <pre>  selector ::= (#id)?(.class)*</pre>
+ * cf. http://haml-lang.com/
+ *
+ * <p>The naming convention used in this class is slightly different from
+ * normal classes. A CamelCase interface corresponds to an entity in the DTD.
+ * _CamelCase is for internal refactoring. An element builder interface is in
+ * UPPERCASE, corresponding to an element definition in the DTD. $lowercase is
+ * used as attribute builder methods to differentiate from element builder
+ * methods.
+ */
+@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
+public class HamletSpec {
+  // The enum values are lowercase for better compression,
+  // while avoiding runtime conversion.
+  // cf. http://www.w3.org/Protocols/HTTP/Performance/Compression/HTMLCanon.html
+  //     http://www.websiteoptimization.com/speed/tweak/lowercase/
+  /** %Shape (case-insensitive) */
+  public enum Shape {
+    /**
+     * rectangle
+     */
+    rect,
+    /**
+     * circle
+     */
+    circle,
+    /**
+     * polygon
+     */
+    poly,
+    /**
+     * default
+     */
+    Default
+  };
+
+  /** Values for the %18n dir attribute (case-insensitive) */
+  public enum Dir {
+    /**
+     * left to right
+     */
+    ltr,
+    /**
+     * right to left
+     */
+    rtl
+  };
+
+  /** %MediaDesc (case-sensitive) */
+  public enum Media {
+    /**
+     * computer screen
+     */
+    screen,
+    /**
+     * teletype/terminal
+     */
+    tty,
+    /**
+     * television
+     */
+    tv,
+    /**
+     * projection
+     */
+    projection,
+    /**
+     * mobile device
+     */
+    handheld,
+    /**
+     * print media
+     */
+    print,
+    /**
+     * braille
+     */
+    braille,
+    /**
+     * aural
+     */
+    aural,
+    /**
+     * suitable all media
+     */
+    all
+  };
+
+  /** %LinkTypes (case-insensitive) */
+  public enum LinkType {
+    /**
+     *
+     */
+    alternate,
+    /**
+     *
+     */
+    stylesheet,
+    /**
+     *
+     */
+    start,
+    /**
+     *
+     */
+    next,
+    /**
+     *
+     */
+    prev,
+    /**
+     *
+     */
+    contents,
+    /**
+     *
+     */
+    index,
+    /**
+     *
+     */
+    glossary,
+    /**
+     *
+     */
+    copyright,
+    /**
+     *
+     */
+    chapter,
+    /**
+     *
+     */
+    section,
+    /**
+     *
+     */
+    subsection,
+    /**
+     *
+     */
+    appendix,
+    /**
+     *
+     */
+    help,
+    /**
+     *
+     */
+    bookmark
+  };
+
+  /** Values for form methods (case-insensitive) */
+  public enum Method {
+    /**
+     * HTTP GET
+     */
+    get,
+    /**
+     * HTTP POST
+     */
+    post
+  };
+
+  /** %InputType (case-insensitive) */
+  public enum InputType {
+    /**
+     *
+     */
+    text,
+    /**
+     *
+     */
+    password,
+    /**
+     *
+     */
+    checkbox,
+    /**
+     *
+     */
+    radio,
+    /**
+     *
+     */
+    submit,
+    /**
+     *
+     */
+    reset,
+    /**
+     *
+     */
+    file,
+    /**
+     *
+     */
+    hidden,
+    /**
+     *
+     */
+    image,
+    /**
+     *
+     */
+    button
+  };
+
+  /** Values for button types */
+  public enum ButtonType {
+    /**
+     *
+     */
+    button,
+    /**
+     *
+     */
+    submit,
+    /**
+     *
+     */
+    reset
+  };
+
+  /** %Scope (case-insensitive) */
+  public enum Scope {
+    /**
+     *
+     */
+    row,
+    /**
+     *
+     */
+    col,
+    /**
+     *
+     */
+    rowgroup,
+    /**
+     *
+     */
+    colgroup
+  };
+
+  /**
+   * The element annotation for specifying element options other than
+   * attributes and allowed child elements
+   */
+  @Target({ElementType.TYPE})
+  @Retention(RetentionPolicy.RUNTIME)
+  public @interface Element {
+    /**
+     * Whether the start tag is required for the element.
+     * @return true if start tag is required
+     */
+    boolean startTag() default true;
+
+    /**
+     * Whether the end tag is required.
+     * @return true if end tag is required
+     */
+    boolean endTag() default true;
+  }
+
+  /**
+   *
+   */
+  public interface __ {}
+
+  /**
+   *
+   */
+  public interface _Child extends __ {
+    /**
+     * Finish the current element.
+     * @return the parent element
+     */
+    __ __();
+  }
+
+  /**
+   *
+   */
+  public interface _Script {
+    /**
+     * Add a script element.
+     * @return a script element builder
+     */
+    SCRIPT script();
+
+    /**
+     * Add a script element
+     * @param src uri of the script
+     * @return the current element builder
+     */
+    _Script script(String src);
+  }
+
+  /**
+   *
+   */
+  public interface _Object {
+      /**
+     * Add an object element.
+     * @return an object element builder
+     */
+    OBJECT object();
+
+    /**
+     * Add an object element.
+     * @param selector as #id.class etc.
+     * @return an object element builder
+     */
+    OBJECT object(String selector);
+  }
+
+  /** %head.misc */
+  public interface HeadMisc extends _Script, _Object {
+    /**
+     * Add a style element.
+     * @return a style element builder
+     */
+    STYLE style();
+
+    /**
+     * Add a css style element.
+     * @param lines content of the style sheet
+     * @return the current element builder
+     */
+    HeadMisc style(Object... lines);
+
+    /**
+     * Add a meta element.
+     * @return a meta element builder
+     */
+    META meta();
+
+    /**
+     * Add a meta element.
+     * Shortcut of <code>meta().$name(name).$content(content).__();</code>
+     * @param name of the meta element
+     * @param content of the meta element
+     * @return the current element builder
+     */
+    HeadMisc meta(String name, String content);
+
+    /**
+     * Add a meta element with http-equiv attribute.
+     * Shortcut of <br>
+     * <code>meta().$http_equiv(header).$content(content).__();</code>
+     * @param header for the http-equiv attribute
+     * @param content of the header
+     * @return the current element builder
+     */
+    HeadMisc meta_http(String header, String content);
+
+    /**
+     * Add a link element.
+     * @return a link element builder
+     */
+    LINK link();
+
+    /**
+     * Add a link element.
+     * Implementation should try to figure out type by the suffix of href.
+     * So <code>link("style.css");</code> is a shortcut of
+     * <code>link().$rel("stylesheet").$type("text/css").$href("style.css").__();
+     * </code>
+     * @param href of the link
+     * @return the current element builder
+     */
+    HeadMisc link(String href);
+  }
+
+  /** %heading */
+  public interface Heading {
+    /**
+     * Add an H1 element.
+     * @return a new H1 element builder
+     */
+    H1 h1();
+
+    /**
+     * Add a complete H1 element.
+     * @param cdata the content of the element
+     * @return the current element builder
+     */
+    Heading h1(String cdata);
+
+    /**
+     * Add a complete H1 element
+     * @param selector the css selector in the form of (#id)?(.class)*
+     * @param cdata the content of the element
+     * @return the current element builder
+     */
+    Heading h1(String selector, String cdata);
+
+    /**
+     * Add an H2 element.
+     * @return a new H2 element builder
+     */
+    H2 h2();
+
+    /**
+     * Add a complete H2 element.
+     * @param cdata the content of the element
+     * @return the current element builder
+     */
+    Heading h2(String cdata);
+
+    /**
+     * Add a complete H1 element
+     * @param selector the css selector in the form of (#id)?(.class)*
+     * @param cdata the content of the element
+     * @return the current element builder
+     */
+    Heading h2(String selector, String cdata);
+
+    /**
+     * Add an H3 element.
+     * @return a new H3 element builder
+     */
+    H3 h3();
+
+    /**
+     * Add a complete H3 element.
+     * @param cdata the content of the element
+     * @return the current element builder
+     */
+    Heading h3(String cdata);
+
+    /**
+     * Add a complete H1 element
+     * @param selector the css selector in the form of (#id)?(.class)*
+     * @param cdata the content of the element
+     * @return the current element builder
+     */
+    Heading h3(String selector, String cdata);
+
+    /**
+     * Add an H4 element.
+     * @return a new H4 element builder
+     */
+    H4 h4();
+
+    /**
+     * Add a complete H4 element.
+     * @param cdata the content of the element
+     * @return the current element builder
+     */
+    Heading h4(String cdata);
+
+    /**
+     * Add a complete H4 element
+     * @param selector the css selector in the form of (#id)?(.class)*
+     * @param cdata the content of the element
+     * @return the current element builder
+     */
+    Heading h4(String selector, String cdata);
+
+    /**
+     * Add an H5 element.
+     * @return a new H5 element builder
+     */
+    H5 h5();
+
+    /**
+     * Add a complete H5 element.
+     * @param cdata the content of the element
+     * @return the current element builder
+     */
+    Heading h5(String cdata);
+
+    /**
+     * Add a complete H5 element
+     * @param selector the css selector in the form of (#id)?(.class)*
+     * @param cdata the content of the element
+     * @return the current element builder
+     */
+    Heading h5(String selector, String cdata);
+
+    /**
+     * Add an H6 element.
+     * @return a new H6 element builder
+     */
+    H6 h6();
+
+    /**
+     * Add a complete H6 element.
+     * @param cdata the content of the element
+     * @return the current element builder
+     */
+    Heading h6(String cdata);
+
+    /**
+     * Add a complete H6 element.
+     * @param selector the css selector in the form of (#id)?(.class)*
+     * @param cdata the content of the element
+     * @return the current element builder
+     */
+    Heading h6(String selector, String cdata);
+  }
+
+  /** %list */
+  public interface Listing {
+
+    /**
+     * Add a UL (unordered list) element.
+     * @return a new UL element builder
+     */
+    UL ul();
+
+    /**
+     * Add a UL (unordered list) element.
+     * @param selector the css selector in the form of (#id)?(.class)*
+     * @return a new UL element builder
+     */
+    UL ul(String selector);
+
+    /**
+     * Add a OL (ordered list) element.
+     * @return a new UL element builder
+     */
+    OL ol();
+
+    /**
+     * Add a OL (ordered list) element.
+     * @param selector the css selector in the form of (#id)?(.class)*
+     * @return a new UL element builder
+     */
+    OL ol(String selector);
+  }
+
+  /** % preformatted */
+  public interface Preformatted {
+
+    /**
+     * Add a PRE (preformatted) element.
+     * @return a new PRE element builder
+     */
+    PRE pre();
+
+    /**
+     * Add a PRE (preformatted) element.
+     * @param selector the css selector in the form of (#id)?(.class)*
+     * @return a new PRE element builder
+     */
+    PRE pre(String selector);
+  }
+
+  /** %coreattrs */
+  public interface CoreAttrs {
+    /** document-wide unique id
+     * @param id the id
+     * @return the current element builder
+     */
+    CoreAttrs $id(String id);
+
+    /** space-separated list of classes
+     * @param cls the classes
+     * @return the current element builder
+     */
+    CoreAttrs $class(String cls);
+
+    /** associated style info
+     * @param style the style
+     * @return the current element builder
+     */
+    CoreAttrs $style(String style);
+
+    /** advisory title
+     * @param title the title
+     * @return the current element builder
+     */
+    CoreAttrs $title(String title);
+  }
+
+  /** %i18n */
+  public interface I18nAttrs {
+    /** language code
+     * @param lang the code
+     * @return the current element builder
+     */
+    I18nAttrs $lang(String lang);
+
+    /** direction for weak/neutral text
+     * @param dir the {@link Dir} value
+     * @return the current element builder
+     */
+    I18nAttrs $dir(Dir dir);
+  }
+
+  /** %events */
+  public interface EventsAttrs {
+
+    /** a pointer button was clicked
+     * @param onclick the script
+     * @return the current element builder
+     */
+    EventsAttrs $onclick(String onclick);
+
+    /** a pointer button was double clicked
+     * @param ondblclick the script
+     * @return the current element builder
+     */
+    EventsAttrs $ondblclick(String ondblclick);
+
+    /** a pointer button was pressed down
+     * @param onmousedown the script
+     * @return the current element builder
+     */
+    EventsAttrs $onmousedown(String onmousedown);
+
+    /** a pointer button was released
+     * @param onmouseup the script
+     * @return the current element builder
+     */
+    EventsAttrs $onmouseup(String onmouseup);
+
+    /** a pointer was moved onto
+     * @param onmouseover the script
+     * @return the current element builder
+     */
+    EventsAttrs $onmouseover(String onmouseover);
+
+    /** a pointer was moved within
+     * @param onmousemove the script
+     * @return the current element builder
+     */
+    EventsAttrs $onmousemove(String onmousemove);
+
+    /** a pointer was moved away
+     * @param onmouseout the script
+     * @return the current element builder
+     */
+    EventsAttrs $onmouseout(String onmouseout);
+
+    /** a key was pressed and released
+     * @param onkeypress the script
+     * @return the current element builder
+     */
+    EventsAttrs $onkeypress(String onkeypress);
+
+    /** a key was pressed down
+     * @param onkeydown the script
+     * @return the current element builder
+     */
+    EventsAttrs $onkeydown(String onkeydown);
+
+    /** a key was released
+     * @param onkeyup the script
+     * @return the current element builder
+     */
+    EventsAttrs $onkeyup(String onkeyup);
+  }
+
+  /** %attrs */
+  public interface Attrs extends CoreAttrs, I18nAttrs, EventsAttrs {
+  }
+
+  /** Part of %pre.exclusion */
+  public interface _FontSize extends _Child {
+    // BIG omitted cf. http://www.w3.org/TR/html5-diff/
+
+    /**
+     * Add a SMALL (small print) element
+     * @return a new SMALL element builder
+     */
+    SMALL small();
+
+    /**
+     * Add a complete small (small print) element.
+     * Shortcut of: small().__(cdata).__();
+     * @param cdata the content of the element
+     * @return the current element builder
+     */
+    _FontSize small(String cdata);
+
+    /**
+     * Add a complete small (small print) element.
+     * Shortcut of: small().$id(id).$class(class).__(cdata).__();
+     * @param selector css selector in the form of (#id)?(.class)*
+     * @param cdata the content of the element
+     * @return the current element builder
+     */
+    _FontSize small(String selector, String cdata);
+  }
+
+  /** %fontstyle -(%pre.exclusion) */
+  public interface _FontStyle extends _Child {
+    // TT omitted
+
+    /**
+     * Add an I (italic, alt voice/mood) element.
+     * @return the new I element builder
+     */
+    I i();
+
+    /**
+     * Add a complete I (italic, alt voice/mood) element.
+     * @param cdata the content of the element
+     * @return the current element builder
+     */
+    _FontStyle i(String cdata);
+
+    /**
+     * Add a complete I (italic, alt voice/mood) element.
+     * @param selector the css selector in the form of (#id)?(.class)*
+     * @param cdata the content of the element
+     * @return the current element builder
+     */
+    _FontStyle i(String selector, String cdata);
+
+    /**
+     * Add a new B (bold/important) element.
+     * @return a new B element builder
+     */
+    B b();
+
+    /**
+     * Add a complete B (bold/important) element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _FontStyle b(String cdata);
+
+    /**
+     * Add a complete B (bold/important) element.
+     * @param selector the css select (#id)?(.class)*
+     * @param cdata the content
+     * @return the current element builder
+     */
+     _FontStyle b(String selector, String cdata);
+  }
+
+  /** %fontstyle */
+  public interface FontStyle extends _FontStyle, _FontSize {
+  }
+
+  /** %phrase */
+  public interface Phrase extends _Child {
+
+    /**
+     * Add an EM (emphasized) element.
+     * @return a new EM element builder
+     */
+    EM em();
+
+    /**
+     * Add an EM (emphasized) element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    Phrase em(String cdata);
+
+    /**
+     * Add an EM (emphasized) element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @param cdata the content
+     * @return the current element builder
+     */
+    Phrase em(String selector, String cdata);
+
+    /**
+     * Add a STRONG (important) element.
+     * @return a new STRONG element builder
+     */
+    STRONG strong();
+
+    /**
+     * Add a complete STRONG (important) element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    Phrase strong(String cdata);
+
+    /**
+     * Add a complete STRONG (important) element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @param cdata the content
+     * @return the current element builder
+     */
+    Phrase strong(String selector, String cdata);
+
+    /**
+     * Add a DFN element.
+     * @return a new DFN element builder
+     */
+    DFN dfn();
+
+    /**
+     * Add a complete DFN element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    Phrase dfn(String cdata);
+
+    /**
+     * Add a complete DFN element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @param cdata the content
+     * @return the current element builder
+     */
+    Phrase dfn(String selector, String cdata);
+
+    /**
+     * Add a CODE (code fragment) element.
+     * @return a new CODE element builder
+     */
+    CODE code();
+
+    /**
+     * Add a complete CODE element.
+     * @param cdata the code
+     * @return the current element builder
+     */
+    Phrase code(String cdata);
+
+    /**
+     * Add a complete CODE element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @param cdata the code
+     * @return the current element builder
+     */
+    Phrase code(String selector, String cdata);
+
+    /**
+     * Add a SAMP (sample) element.
+     * @return a new SAMP element builder
+     */
+    SAMP samp();
+
+    /**
+     * Add a complete SAMP (sample) element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    Phrase samp(String cdata);
+
+    /**
+     * Add a complete SAMP (sample) element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @param cdata the content
+     * @return the current element builder
+     */
+    Phrase samp(String selector, String cdata);
+
+    /**
+     * Add a KBD (keyboard) element.
+     * @return a new KBD element builder
+     */
+    KBD kbd();
+
+    /**
+     * Add a KBD (keyboard) element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    Phrase kbd(String cdata);
+
+    /**
+     * Add a KBD (keyboard) element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @param cdata the content
+     * @return the current element builder
+     */
+    Phrase kbd(String selector, String cdata);
+
+    /**
+     * Add a VAR (variable) element.
+     * @return a new VAR element builder
+     */
+    VAR var();
+
+    /**
+     * Add a VAR (variable) element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    Phrase var(String cdata);
+
+    /**
+     * Add a VAR (variable) element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @param cdata the content
+     * @return the current element builder
+     */
+    Phrase var(String selector, String cdata);
+
+    /**
+     * Add a CITE element.
+     * @return a new CITE element builder
+     */
+    CITE cite();
+
+    /**
+     * Add a CITE element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    Phrase cite(String cdata);
+
+    /**
+     * Add a CITE element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @param cdata the content
+     * @return the current element builder
+     */
+    Phrase cite(String selector, String cdata);
+
+    /**
+     * Add an ABBR (abbreviation) element.
+     * @return a new ABBR element builder
+     */
+    ABBR abbr();
+
+    /**
+     * Add a ABBR (abbreviation) element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    Phrase abbr(String cdata);
+
+    /**
+     * Add a ABBR (abbreviation) element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @param cdata the content
+     * @return the current element builder
+     */
+    Phrase abbr(String selector, String cdata);
+
+    // ACRONYM omitted, use ABBR
+  }
+
+  /** Part of %pre.exclusion */
+  public interface _ImgObject extends _Object, _Child {
+
+    /**
+     * Add a IMG (image) element.
+     * @return a new IMG element builder
+     */
+    IMG img();
+
+    /**
+     * Add a IMG (image) element.
+     * @param src the source URL of the image
+     * @return the current element builder
+     */
+    _ImgObject img(String src);
+  }
+
+  /** Part of %pre.exclusion */
+  public interface _SubSup extends _Child {
+
+    /**
+     * Add a SUB (subscript) element.
+     * @return a new SUB element builder
+     */
+    SUB sub();
+
+    /**
+     * Add a complete SUB (subscript) element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _SubSup sub(String cdata);
+
+    /**
+     * Add a complete SUB (subscript) element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _SubSup sub(String selector, String cdata);
+
+    /**
+     * Add a SUP (superscript) element.
+     * @return a new SUP element builder
+     */
+    SUP sup();
+
+    /**
+     * Add a SUP (superscript) element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _SubSup sup(String cdata);
+
+    /**
+     * Add a SUP (superscript) element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _SubSup sup(String selector, String cdata);
+  }
+
+  /**
+   *
+   */
+  public interface _Anchor {
+
+    /**
+     * Add a A (anchor) element.
+     * @return a new A element builder
+     */
+    A a();
+
+    /**
+     * Add a A (anchor) element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new A element builder
+     */
+    A a(String selector);
+
+    /** Shortcut for <code>a().$href(href).__(anchorText).__();</code>
+     * @param href the URI
+     * @param anchorText for the URI
+     * @return the current element builder
+     */
+    _Anchor a(String href, String anchorText);
+
+    /** Shortcut for <code>a(selector).$href(href).__(anchorText).__();</code>
+     * @param selector in the form of (#id)?(.class)*
+     * @param href the URI
+     * @param anchorText for the URI
+     * @return the current element builder
+     */
+    _Anchor a(String selector, String href, String anchorText);
+  }
+
+  /**
+   * INS and DEL are unusual for HTML
+   * "in that they may serve as either block-level or inline elements
+   * (but not both)".
+   * <br>cf. http://www.w3.org/TR/html4/struct/text.html#h-9.4
+   * <br>cf. http://www.w3.org/TR/html5/edits.html#edits
+   */
+  public interface _InsDel {
+
+    /**
+     * Add an INS (insert) element.
+     * @return an INS element builder
+     */
+    INS ins();
+
+    /**
+     * Add a complete INS element.
+     * @param cdata inserted data
+     * @return the current element builder
+     */
+    _InsDel ins(String cdata);
+
+    /**
+     * Add a DEL (delete) element.
+     * @return a DEL element builder
+     */
+    DEL del();
+
+    /**
+     * Add a complete DEL element.
+     * @param cdata deleted data
+     * @return the current element builder
+     */
+    _InsDel del(String cdata);
+  }
+
+  /** %special -(A|%pre.exclusion) */
+  public interface _Special extends _Script, _InsDel {
+
+    /**
+     * Add a BR (line break) element.
+     * @return a new BR element builder
+     */
+    BR br();
+
+    /**
+     * Add a BR (line break) element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return the current element builder
+     */
+    _Special br(String selector);
+
+    /**
+     * Add a MAP element.
+     * @return a new MAP element builder
+     */
+    MAP map();
+
+    /**
+     * Add a MAP element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new MAP element builder
+     */
+    MAP map(String selector);
+
+    /**
+     * Add a Q (inline quotation) element.
+     * @return a q (inline quotation) element builder
+     */
+    Q q();
+
+    /**
+     * Add a complete Q element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _Special q(String cdata);
+
+    /**
+     * Add a Q element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _Special q(String selector, String cdata);
+
+    /**
+     * Add a SPAN element.
+     * @return a new SPAN element builder
+     */
+    SPAN span();
+
+    /**
+     * Add a SPAN element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _Special span(String cdata);
+
+    /**
+     * Add a SPAN element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _Special span(String selector, String cdata);
+
+    /**
+     * Add a bdo (bidirectional override) element
+     * @return a bdo element builder
+     */
+    BDO bdo();
+
+    /**
+     * Add a bdo (bidirectional override) element
+     * @param dir the direction of the text
+     * @param cdata the text
+     * @return the current element builder
+     */
+    _Special bdo(Dir dir, String cdata);
+  }
+
+  /** %special */
+  public interface Special extends _Anchor, _ImgObject, _SubSup, _Special {
+  }
+
+  /**
+   *
+   */
+  public interface _Label extends _Child {
+
+    /**
+     * Add a LABEL element.
+     * @return a new LABEL element builder
+     */
+    LABEL label();
+
+    /**
+     * Add a LABEL element.
+     * Shortcut of <code>label().$for(forId).__(cdata).__();</code>
+     * @param forId the for attribute
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _Label label(String forId, String cdata);
+  }
+
+  /**
+   *
+   */
+  public interface _FormCtrl {
+
+    /**
+     * Add a INPUT element.
+     * @return a new INPUT element builder
+     */
+    INPUT input();
+
+    /**
+     * Add a INPUT element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new INPUT element builder
+     */
+    INPUT input(String selector);
+
+    /**
+     * Add a SELECT element.
+     * @return a new SELECT element builder
+     */
+    SELECT select();
+
+    /**
+     * Add a SELECT element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new SELECT element builder
+     */
+    SELECT select(String selector);
+
+    /**
+     * Add a TEXTAREA element.
+     * @return a new TEXTAREA element builder
+     */
+    TEXTAREA textarea();
+
+    /**
+     * Add a TEXTAREA element.
+     * @param selector
+     * @return a new TEXTAREA element builder
+     */
+    TEXTAREA textarea(String selector);
+
+    /**
+     * Add a complete TEXTAREA element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _FormCtrl textarea(String selector, String cdata);
+
+    /**
+     * Add a BUTTON element.
+     * @return a new BUTTON element builder
+     */
+    BUTTON button();
+
+    /**
+     * Add a BUTTON element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new BUTTON element builder
+     */
+    BUTTON button(String selector);
+
+    /**
+     * Add a complete BUTTON element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _FormCtrl button(String selector, String cdata);
+  }
+
+  /** %formctrl */
+  public interface FormCtrl extends _Label, _FormCtrl {
+  }
+
+  /**
+   *
+   */
+  public interface _Content extends _Child {
+    /**
+     * Content of the element
+     * @param lines of content
+     * @return the current element builder
+     */
+    _Content __(Object... lines);
+  }
+
+  /**
+   *
+   */
+  public interface _RawContent extends _Child {
+    /**
+     * Raw (no need to be HTML escaped) content
+     * @param lines of content
+     * @return the current element builder
+     */
+    _RawContent _r(Object... lines);
+  }
+
+  /** #PCDATA */
+  public interface PCData extends _Content, _RawContent {
+  }
+
+  /** %inline */
+  public interface Inline extends PCData, FontStyle, Phrase, Special, FormCtrl {
+  }
+
+  /**
+   *
+   */
+  public interface I extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface B extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface SMALL extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface EM extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface STRONG extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface DFN extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface CODE extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface SAMP extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface KBD extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface VAR extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface CITE extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface ABBR extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface ACRONYM extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface SUB extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface SUP extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface SPAN extends Attrs, Inline, _Child {
+  }
+
+  /** The dir attribute is required for the BDO element */
+  public interface BDO extends CoreAttrs, I18nAttrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface BR extends CoreAttrs, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface _Form {
+
+    /**
+     * Add a FORM element.
+     * @return a new FORM element builder
+     */
+    FORM form();
+
+    /**
+     * Add a FORM element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new FORM element builder
+     */
+    FORM form(String selector);
+  }
+
+  /**
+   *
+   */
+  public interface _FieldSet {
+
+    /**
+     * Add a FIELDSET element.
+     * @return a new FIELDSET element builder
+     */
+    FIELDSET fieldset();
+
+    /**
+     * Add a FIELDSET element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new FIELDSET element builder
+     */
+    FIELDSET fieldset(String selector);
+  }
+
+  /** %block -(FORM|FIELDSET) */
+  public interface _Block extends Heading, Listing, Preformatted {
+
+    /**
+     * Add a P (paragraph) element.
+     * @return a new P element builder
+     */
+    P p();
+
+    /**
+     * Add a P (paragraph) element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new P element builder
+     */
+    P p(String selector);
+
+    /**
+     * Add a DL (description list) element.
+     * @return a new DL element builder
+     */
+    DL dl();
+
+    /**
+     * Add a DL element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new DL element builder
+     */
+    DL dl(String selector);
+
+    /**
+     * Add a DIV element.
+     * @return a new DIV element builder
+     */
+    DIV div();
+
+    /**
+     * Add a DIV element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new DIV element builder
+     */
+    DIV div(String selector);
+
+    // NOSCRIPT omitted
+    // cf. http://www.w3.org/html/wg/tracker/issues/117
+
+    /**
+     * Add a BLOCKQUOTE element.
+     * @return a new BLOCKQUOTE element builder
+     */
+    BLOCKQUOTE blockquote();
+
+    /**
+     * Alias of blockquote
+     * @return a new BLOCKQUOTE element builder
+     */
+    BLOCKQUOTE bq();
+
+    /**
+     * Add a HR (horizontal rule) element.
+     * @return a new HR element builder
+     */
+    HR hr();
+
+    /**
+     * Add a HR element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new HR element builder
+     */
+    _Block hr(String selector);
+
+    /**
+     * Add a TABLE element.
+     * @return a new TABLE element builder
+     */
+    TABLE table();
+
+    /**
+     * Add a TABLE element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new TABLE element builder
+     */
+    TABLE table(String selector);
+
+    /**
+     * Add a ADDRESS element.
+     * @return a new ADDRESS element builder
+     */
+    ADDRESS address();
+
+    /**
+     * Add a complete ADDRESS element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _Block address(String cdata);
+
+    /**
+     * Embed a sub-view.
+     * @param cls the sub-view class
+     * @return the current element builder
+     */
+    _Block __(Class<? extends SubView> cls);
+  }
+
+  /** %block */
+  public interface Block extends _Block, _Form, _FieldSet {
+  }
+
+  /** %flow */
+  public interface Flow extends Block, Inline {
+  }
+
+  /**
+   *
+   */
+  public interface _Body extends Block, _Script, _InsDel {
+  }
+
+  /**
+   *
+   */
+  public interface BODY extends Attrs, _Body, _Child {
+
+    /**
+     * The document has been loaded.
+     * @param script to invoke
+     * @return the current element builder
+     */
+    BODY $onload(String script);
+
+    /**
+     * The document has been removed
+     * @param script to invoke
+     * @return the current element builder
+     */
+    BODY $onunload(String script);
+  }
+
+  /**
+   *
+   */
+  public interface ADDRESS extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface DIV extends Attrs, Flow, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface A extends Attrs, _Child, /* %inline -(A) */
+                             PCData, FontStyle, Phrase, _ImgObject, _Special,
+                             _SubSup, FormCtrl {
+    // $charset omitted.
+
+    /** advisory content type
+     * @param cdata the content-type
+     * @return the current element builder
+     */
+    A $type(String cdata);
+
+    // $name omitted. use id instead.
+    /** URI for linked resource
+     * @param uri the URI
+     * @return the current element builder
+     */
+    A $href(String uri);
+
+    /** language code
+     * @param cdata the code
+     * @return the current element builder
+     */
+    A $hreflang(String cdata);
+
+    /** forward link types
+     * @param linkTypes the types
+     * @return the current element builder
+     */
+    A $rel(EnumSet<LinkType> linkTypes);
+
+    /**
+     * forward link types
+     * @param linkTypes space-separated list of link types
+     * @return the current element builder.
+     */
+    A $rel(String linkTypes);
+
+    // $rev omitted. Instead of rev="made", use rel="author"
+
+    /** accessibility key character
+     * @param cdata the key
+     * @return the current element builder
+     */
+    A $accesskey(String cdata);
+
+    // $shape and coords omitted. use area instead of a for image maps.
+    /** position in tabbing order
+     * @param index the index
+     * @return the current element builder
+     */
+    A $tabindex(int index);
+
+    /** the element got the focus
+     * @param script to invoke
+     * @return the current element builder
+     */
+    A $onfocus(String script);
+
+    /** the element lost the focus
+     * @param script to invoke
+     * @return the current element builder
+     */
+    A $onblur(String script);
+  }
+
+  /**
+   *
+   */
+  public interface MAP extends Attrs, Block, _Child {
+
+    /**
+     * Add a AREA element.
+     * @return a new AREA element builder
+     */
+    AREA area();
+
+    /**
+     * Add a AREA element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new AREA element builder
+     */
+    AREA area(String selector);
+
+    /** for reference by usemap
+     * @param name of the map
+     * @return the current element builder
+     */
+    MAP $name(String name);
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface AREA extends Attrs, _Child {
+
+    /** controls interpretation of coords
+     * @param shape of the area
+     * @return the current element builder
+     */
+    AREA $shape(Shape shape);
+
+    /** comma-separated list of lengths
+     * @param cdata coords of the area
+     * @return the current element builder
+     */
+    AREA $coords(String cdata);
+
+    /** URI for linked resource
+     * @param uri the URI
+     * @return the current element builder
+     */
+    AREA $href(String uri);
+
+    // $nohref omitted./
+    /** short description
+     * @param desc the description
+     * @return the current element builder
+     */
+    AREA $alt(String desc);
+
+    /** position in tabbing order
+     * @param index of the order
+     * @return the current element builder
+     */
+    AREA $tabindex(int index);
+
+    /** accessibility key character
+     * @param cdata the key
+     * @return the current element builder
+     */
+    AREA $accesskey(String cdata);
+
+    /** the element got the focus
+     * @param script to invoke
+     * @return the current element builder
+     */
+    AREA $onfocus(String script);
+
+    /** the element lost the focus
+     * @param script to invoke
+     * @return the current element builder
+     */
+    AREA $onblur(String script);
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface LINK extends Attrs, _Child {
+    // $charset omitted
+    /** URI for linked resource
+     * @param uri the URI
+     * @return the current element builder
+     */
+    LINK $href(String uri);
+
+    /** language code
+     * @param cdata the code
+     * @return the current element builder
+     */
+    LINK $hreflang(String cdata);
+
+    /** advisory content type
+     * @param cdata the type
+     * @return the current element builder
+     */
+    LINK $type(String cdata);
+
+    /** forward link types
+     * @param linkTypes the types
+     * @return the current element builder
+     */
+    LINK $rel(EnumSet<LinkType> linkTypes);
+
+    /**
+     * forward link types.
+     * @param linkTypes space-separated link types
+     * @return the current element builder
+     */
+    LINK $rel(String linkTypes);
+
+    // $rev omitted. Instead of rev="made", use rel="author"
+
+    /** for rendering on these media
+     * @param mediaTypes the media types
+     * @return the current element builder
+     */
+    LINK $media(EnumSet<Media> mediaTypes);
+
+    /**
+     * for rendering on these media.
+     * @param mediaTypes comma-separated list of media
+     * @return the current element builder
+     */
+    LINK $media(String mediaTypes);
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface IMG extends Attrs, _Child {
+
+    /** URI of image to embed
+     * @param uri the URI
+     * @return the current element builder
+     */
+    IMG $src(String uri);
+
+    /** short description
+     * @param desc the description
+     * @return the current element builder
+     */
+    IMG $alt(String desc);
+
+    // $longdesc omitted. use <a...><img..></a> instead
+    // $name omitted. use id instead.
+
+    /** override height
+     * @param pixels the height
+     * @return the current element builder
+     */
+    IMG $height(int pixels);
+
+    /**
+     * override height
+     * @param cdata the height (can use %, * etc.)
+     * @return the current element builder
+     */
+    IMG $height(String cdata);
+
+    /** override width
+     * @param pixels the width
+     * @return the current element builder
+     */
+    IMG $width(int pixels);
+
+    /**
+     * override width
+     * @param cdata the width (can use %, * etc.)
+     * @return the current element builder
+     */
+    IMG $width(String cdata);
+
+    /** use client-side image map
+     * @param uri the URI
+     * @return the current element builder
+     */
+    IMG $usemap(String uri);
+
+    /** use server-side image map
+     * @return the current element builder
+     */
+    IMG $ismap();
+  }
+
+  /**
+   *
+   */
+  public interface _Param extends _Child {
+
+    /**
+     * Add a PARAM (parameter) element.
+     * @return a new PARAM element builder
+     */
+    PARAM param();
+
+    /**
+     * Add a PARAM element.
+     * Shortcut of <code>param().$name(name).$value(value).__();</code>
+     * @param name of the value
+     * @param value the value
+     * @return the current element builder
+     */
+    _Param param(String name, String value);
+  }
+
+  /**
+   *
+   */
+  public interface OBJECT extends Attrs, _Param, Flow, _Child {
+    // $declare omitted. repeat element completely
+
+    // $archive, classid, codebase, codetype ommited. use data and type
+
+    /** reference to object's data
+     * @param uri the URI
+     * @return the current element builder
+     */
+    OBJECT $data(String uri);
+
+    /** content type for data
+     * @param contentType the type
+     * @return the current element builder
+     */
+    OBJECT $type(String contentType);
+
+    // $standby omitted. fix the resource instead.
+
+    /** override height
+     * @param pixels the height
+     * @return the current element builder
+     */
+    OBJECT $height(int pixels);
+
+    /**
+     * override height
+     * @param length the height (can use %, *)
+     * @return the current element builder
+     */
+    OBJECT $height(String length);
+
+    /** override width
+     * @param pixels the width
+     * @return the current element builder
+     */
+    OBJECT $width(int pixels);
+
+    /**
+     * override width
+     * @param length the height (can use %, *)
+     * @return the current element builder
+     */
+    OBJECT $width(String length);
+
+    /** use client-side image map
+     * @param uri the URI/name of the map
+     * @return the current element builder
+     */
+    OBJECT $usemap(String uri);
+
+    /** submit as part of form
+     * @param cdata the name of the object
+     * @return the current element builder
+     */
+    OBJECT $name(String cdata);
+
+    /** position in tabbing order
+     * @param index of the order
+     * @return the current element builder
+     */
+    OBJECT $tabindex(int index);
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface PARAM {
+
+    /** document-wide unique id
+     * @param cdata the id
+     * @return the current element builder
+     */
+    PARAM $id(String cdata);
+
+    /** property name. Required.
+     * @param cdata the name
+     * @return the current element builder
+     */
+    PARAM $name(String cdata);
+
+    /** property value
+     * @param cdata the value
+     * @return the current element builder
+     */
+    PARAM $value(String cdata);
+
+    // $type and valuetype omitted
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface HR extends Attrs, _Child {
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface P extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface H1 extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface H2 extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface H3 extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface H4 extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface H5 extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface H6 extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface PRE extends Attrs, _Child, /* (%inline;)* -(%pre.exclusion) */
+                               PCData, _FontStyle, Phrase, _Anchor, _Special,
+                               FormCtrl {
+  }
+
+  /**
+   *
+   */
+  public interface Q extends Attrs, Inline, _Child {
+
+    /** URI for source document or msg
+     * @param uri the URI
+     * @return the current element builder
+     */
+    Q $cite(String uri);
+  }
+
+  /**
+   *
+   */
+  public interface BLOCKQUOTE extends Attrs, Block, _Script, _Child {
+
+    /** URI for source document or msg
+     * @param uri the URI
+     * @return the current element builder
+     */
+    BLOCKQUOTE $cite(String uri);
+  }
+
+  /**
+   * @see _InsDel INS/DEL quirks.
+   */
+  public interface INS extends Attrs, Flow, _Child {
+    /** info on reason for change
+     * @param uri
+     * @return the current element builder
+     */
+    INS $cite(String uri);
+
+    /** date and time of change
+     * @param datetime
+     * @return the current element builder
+     */
+    INS $datetime(String datetime);
+  }
+
+  /**
+   * @see _InsDel INS/DEL quirks.
+   */
+  public interface DEL extends Attrs, Flow, _Child {
+    /** info on reason for change
+     * @param uri the info URI
+     * @return the current element builder
+     */
+    DEL $cite(String uri);
+
+    /** date and time of change
+     * @param datetime the time
+     * @return the current element builder
+     */
+    DEL $datetime(String datetime);
+  }
+
+  /**
+   *
+   */
+  public interface _Dl extends _Child {
+
+    /**
+     * Add a DT (term of the item) element.
+     * @return a new DT element builder
+     */
+    DT dt();
+
+    /**
+     * Add a complete DT element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _Dl dt(String cdata);
+
+    /**
+     * Add a DD (definition/description) element.
+     * @return a new DD element builder
+     */
+    DD dd();
+
+    /**
+     * Add a complete DD element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _Dl dd(String cdata);
+  }
+
+  /**
+   *
+   */
+  public interface DL extends Attrs, _Dl, _Child {
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface DT extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface DD extends Attrs, Flow, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface _Li extends _Child {
+
+    /**
+     * Add a LI (list item) element.
+     * @return a new LI element builder
+     */
+    LI li();
+
+    /**
+     * Add a LI element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _Li li(String cdata);
+  }
+
+  /**
+   *
+   */
+  public interface OL extends Attrs, _Li, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface UL extends Attrs, _Li, _Child {
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface LI extends Attrs, Flow, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface FORM extends Attrs, _Child, /* (%block;|SCRIPT)+ -(FORM) */
+                                _Script, _Block, _FieldSet {
+    /** server-side form handler
+     * @param uri
+     * @return the current element builder
+     */
+    FORM $action(String uri);
+
+    /** HTTP method used to submit the form
+     * @param method
+     * @return the current element builder
+     */
+    FORM $method(Method method);
+
+    /**
+     * contentype for "POST" method.
+     * The default is "application/x-www-form-urlencoded".
+     * Use "multipart/form-data" for input type=file
+     * @param enctype
+     * @return the current element builder
+     */
+    FORM $enctype(String enctype);
+
+    /** list of MIME types for file upload
+     * @param cdata
+     * @return the current element builder
+     */
+    FORM $accept(String cdata);
+
+    /** name of form for scripting
+     * @param cdata
+     * @return the current element builder
+     */
+    FORM $name(String cdata);
+
+    /** the form was submitted
+     * @param script
+     * @return the current element builder
+     */
+    FORM $onsubmit(String script);
+
+    /** the form was reset
+     * @param script
+     * @return the current element builder
+     */
+    FORM $onreset(String script);
+
+    /** (space and/or comma separated) list of supported charsets
+     * @param cdata
+     * @return the current element builder
+     */
+    FORM $accept_charset(String cdata);
+  }
+
+  /**
+   *
+   */
+  public interface LABEL extends Attrs, _Child, /* (%inline;)* -(LABEL) */
+                                 PCData, FontStyle, Phrase, Special, _FormCtrl {
+    /** matches field ID value
+     * @param cdata
+     * @return the current element builder
+     */
+    LABEL $for(String cdata);
+
+    /** accessibility key character
+     * @param cdata
+     * @return the current element builder
+     */
+    LABEL $accesskey(String cdata);
+
+    /** the element got the focus
+     * @param script
+     * @return the current element builder
+     */
+    LABEL $onfocus(String script);
+
+    /** the element lost the focus
+     * @param script
+     * @return the current element builder
+     */
+    LABEL $onblur(String script);
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface INPUT extends Attrs, _Child {
+    /** what kind of widget is needed. default is "text".
+     * @param inputType
+     * @return the current element builder
+     */
+    INPUT $type(InputType inputType);
+
+    /** submit as part of form
+     * @param cdata
+     * @return the current element builder
+     */
+    INPUT $name(String cdata);
+
+    /** Specify for radio buttons and checkboxes
+     * @param cdata
+     * @return the current element builder
+     */
+    INPUT $value(String cdata);
+
+    /** for radio buttons and check boxes
+     * @return the current element builder
+     */
+    INPUT $checked();
+
+    /** unavailable in this context
+     * @return the current element builder
+     */
+    INPUT $disabled();
+
+    /** for text and passwd
+     * @return the current element builder
+     */
+    INPUT $readonly();
+
+    /** specific to each type of field
+     * @param cdata
+     * @return the current element builder
+     */
+    INPUT $size(String cdata);
+
+    /** max chars for text fields
+     * @param length
+     * @return the current element builder
+     */
+    INPUT $maxlength(int length);
+
+    /** for fields with images
+     * @param uri
+     * @return the current element builder
+     */
+    INPUT $src(String uri);
+
+    /** short description
+     * @param cdata
+     * @return the current element builder
+     */
+    INPUT $alt(String cdata);
+
+    // $usemap omitted. use img instead of input for image maps.
+    /** use server-side image map
+     * @return the current element builder
+     */
+    INPUT $ismap();
+
+    /** position in tabbing order
+     * @param index
+     * @return the current element builder
+     */
+    INPUT $tabindex(int index);
+
+    /** accessibility key character
+     * @param cdata
+     * @return the current element builder
+     */
+    INPUT $accesskey(String cdata);
+
+    /** the element got the focus
+     * @param script
+     * @return the current element builder
+     */
+    INPUT $onfocus(String script);
+
+    /** the element lost the focus
+     * @param script
+     * @return the current element builder
+     */
+    INPUT $onblur(String script);
+
+    /** some text was selected
+     * @param script
+     * @return the current element builder
+     */
+    INPUT $onselect(String script);
+
+    /** the element value was changed
+     * @param script
+     * @return the current element builder
+     */
+    INPUT $onchange(String script);
+
+    /** list of MIME types for file upload (csv)
+     * @param contentTypes
+     * @return the current element builder
+     */
+    INPUT $accept(String contentTypes);
+  }
+
+  /**
+   *
+   */
+  public interface _Option extends _Child {
+    /**
+     * Add a OPTION element.
+     * @return a new OPTION element builder
+     */
+    OPTION option();
+
+    /**
+     * Add a complete OPTION element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _Option option(String cdata);
+  }
+
+  /**
+   *
+   */
+  public interface SELECT extends Attrs, _Option, _Child {
+    /**
+     * Add a OPTGROUP element.
+     * @return a new OPTGROUP element builder
+     */
+    OPTGROUP optgroup();
+
+    /** field name
+     * @param cdata
+     * @return the current element builder
+     */
+    SELECT $name(String cdata);
+
+    /** rows visible
+     * @param rows
+     * @return the current element builder
+     */
+    SELECT $size(int rows);
+
+    /** default is single selection
+     * @return the current element builder
+     */
+    SELECT $multiple();
+
+    /** unavailable in this context
+     * @return the current element builder
+     */
+    SELECT $disabled();
+
+    /** position in tabbing order
+     * @param index
+     * @return the current element builder
+     */
+    SELECT $tabindex(int index);
+
+    /** the element got the focus
+     * @param script
+     * @return the current element builder
+     */
+    SELECT $onfocus(String script);
+
+    /** the element lost the focus
+     * @param script
+     * @return the current element builder
+     */
+    SELECT $onblur(String script);
+
+    /** the element value was changed
+     * @param script
+     * @return the current element builder
+     */
+    SELECT $onchange(String script);
+  }
+
+  /**
+   *
+   */
+  public interface OPTGROUP extends Attrs, _Option, _Child {
+    /** unavailable in this context
+     * @return the current element builder
+     */
+    OPTGROUP $disabled();
+
+    /** for use in hierarchical menus
+     * @param cdata
+     * @return the current element builder
+     */
+    OPTGROUP $label(String cdata);
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface OPTION extends Attrs, PCData, _Child {
+    /** currently selected option
+     * @return the current element builder
+     */
+    OPTION $selected();
+
+    /** unavailable in this context
+     * @return the current element builder
+     */
+    OPTION $disabled();
+
+    /** for use in hierarchical menus
+     * @param cdata
+     * @return the current element builder
+     */
+    OPTION $label(String cdata);
+
+    /** defaults to element content
+     * @param cdata
+     * @return the current element builder
+     */
+    OPTION $value(String cdata);
+  }
+
+  /**
+   *
+   */
+  public interface TEXTAREA extends Attrs, PCData, _Child {
+    /** variable name for the text
+     * @param cdata
+     * @return the current element builder
+     */
+    TEXTAREA $name(String cdata);
+
+    /** visible rows
+     * @param rows
+     * @return the current element builder
+     */
+    TEXTAREA $rows(int rows);
+
+    /** visible columns
+     * @param cols
+     * @return the current element builder
+     */
+    TEXTAREA $cols(int cols);
+
+    /** unavailable in this context
+     * @return the current element builder
+     */
+    TEXTAREA $disabled();
+
+    /** text is readonly
+     * @return the current element builder
+     */
+    TEXTAREA $readonly();
+
+    /** position in tabbing order
+     * @param index
+     * @return the current element builder
+     */
+    TEXTAREA $tabindex(int index);
+
+    /** accessibility key character
+     * @param cdata
+     * @return the current element builder
+     */
+    TEXTAREA $accesskey(String cdata);
+
+    /** the element got the focus
+     * @param script
+     * @return the current element builder
+     */
+    TEXTAREA $onfocus(String script);
+
+    /** the element lost the focus
+     * @param script
+     * @return the current element builder
+     */
+    TEXTAREA $onblur(String script);
+
+    /** some text was selected
+     * @param script
+     * @return the current element builder
+     */
+    TEXTAREA $onselect(String script);
+
+    /** the element value was changed
+     * @param script
+     * @return the current element builder
+     */
+    TEXTAREA $onchange(String script);
+  }
+
+  /**
+   *
+   */
+  public interface _Legend extends _Child {
+    /**
+     * Add a LEGEND element.
+     * @return a new LEGEND element builder
+     */
+    LEGEND legend();
+
+    /**
+     * Add a LEGEND element.
+     * @param cdata
+     * @return the current element builder
+     */
+    _Legend legend(String cdata);
+  }
+
+  /**
+   *
+   */
+  public interface FIELDSET extends Attrs, _Legend, PCData, Flow, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface LEGEND extends Attrs, Inline, _Child {
+    /** accessibility key character
+     * @param cdata
+     * @return the current element builder
+     */
+    LEGEND $accesskey(String cdata);
+  }
+
+  /**
+   *
+   */
+  public interface BUTTON extends /* (%flow;)* -(A|%formctrl|FORM|FIELDSET) */
+      _Block, PCData, FontStyle, Phrase, _Special, _ImgObject, _SubSup, Attrs {
+    /** name of the value
+     * @param cdata
+     * @return the current element builder
+     */
+    BUTTON $name(String cdata);
+
+    /** sent to server when submitted
+     * @param cdata
+     * @return the current element builder
+     */
+    BUTTON $value(String cdata);
+
+    /** for use as form button
+     * @param type
+     * @return the current element builder
+     */
+    BUTTON $type(ButtonType type);
+
+    /** unavailable in this context
+     * @return the current element builder
+     */
+    BUTTON $disabled();
+
+    /** position in tabbing order
+     * @param index
+     * @return the current element builder
+     */
+    BUTTON $tabindex(int index);
+
+    /** accessibility key character
+     * @param cdata
+     * @return the current element builder
+     */
+    BUTTON $accesskey(String cdata);
+
+    /** the element got the focus
+     * @param script
+     * @return the current element builder
+     */
+    BUTTON $onfocus(String script);
+
+    /** the element lost the focus
+     * @param script
+     * @return the current element builder
+     */
+    BUTTON $onblur(String script);
+  }
+
+  /**
+   *
+   */
+  public interface _TableRow {
+    /**
+     * Add a TR (table row) element.
+     * @return a new TR element builder
+     */
+    TR tr();
+
+    /**
+     * Add a TR element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new TR element builder
+     */
+    TR tr(String selector);
+  }
+
+  /**
+   *
+   */
+  public interface _TableCol extends _Child {
+    /**
+     * Add a COL element.
+     * @return a new COL element builder
+     */
+    COL col();
+
+    /**
+     * Add a COL element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return the current element builder
+     */
+    _TableCol col(String selector);
+  }
+
+  /**
+   *
+   */
+  public interface _Table extends _TableRow, _TableCol {
+    /**
+     * Add a CAPTION element.
+     * @return a new CAPTION element builder
+     */
+    CAPTION caption();
+
+    /**
+     * Add a CAPTION element.
+     * @param cdata
+     * @return the current element builder
+     */
+    _Table caption(String cdata);
+
+    /**
+     * Add a COLGROPU element.
+     * @return a new COLGROUP element builder
+     */
+    COLGROUP colgroup();
+
+    /**
+     * Add a THEAD element.
+     * @return a new THEAD element builder
+     */
+    THEAD thead();
+
+    /**
+     * Add a THEAD element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new THEAD element builder
+     */
+    THEAD thead(String selector);
+
+    /**
+     * Add a TFOOT element.
+     * @return a new TFOOT element builder
+     */
+    TFOOT tfoot();
+
+    /**
+     * Add a TFOOT element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new TFOOT element builder
+     */
+    TFOOT tfoot(String selector);
+
+    /**
+     * Add a tbody (table body) element.
+     * Must be after thead/tfoot and no tr at the same level.
+     * @return a new tbody element builder
+     */
+    TBODY tbody();
+
+    /**
+     * Add a TBODY element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new TBODY element builder
+     */
+    TBODY tbody(String selector);
+
+    // $summary, width, border, frame, rules, cellpadding, cellspacing omitted
+    // use css instead
+  }
+  /**
+   * TBODY should be used after THEAD/TFOOT, iff there're no TABLE.TR elements.
+   */
+  public interface TABLE extends Attrs, _Table, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface CAPTION extends Attrs, Inline, _Child {
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface THEAD extends Attrs, _TableRow, _Child {
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface TFOOT extends Attrs, _TableRow, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface TBODY extends Attrs, _TableRow, _Child {
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface COLGROUP extends Attrs, _TableCol, _Child {
+    /** default number of columns in group. default: 1
+     * @param cols
+     * @return the current element builder
+     */
+    COLGROUP $span(int cols);
+
+    // $width omitted. use css instead.
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface COL extends Attrs, _Child {
+    /** COL attributes affect N columns. default: 1
+     * @param cols
+     * @return the current element builder
+     */
+    COL $span(int cols);
+    // $width omitted. use css instead.
+  }
+
+  /**
+   *
+   */
+  public interface _Tr extends _Child {
+    /**
+     * Add a TH element.
+     * @return a new TH element builder
+     */
+    TH th();
+
+    /**
+     * Add a complete TH element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _Tr th(String cdata);
+
+    /**
+     * Add a TH element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _Tr th(String selector, String cdata);
+
+    /**
+     * Add a TD element.
+     * @return a new TD element builder
+     */
+    TD td();
+
+    /**
+     * Add a TD element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _Tr td(String cdata);
+
+    /**
+     * Add a TD element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _Tr td(String selector, String cdata);
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface TR extends Attrs, _Tr, _Child {
+  }
+
+  /**
+   *
+   */
+  public interface _Cell extends Attrs, Flow, _Child {
+    // $abbr omited. begin cell text with terse text instead.
+    // use $title for elaberation, when appropriate.
+    // $axis omitted. use scope.
+    /** space-separated list of id's for header cells
+     * @param cdata
+     * @return the current element builder
+     */
+    _Cell $headers(String cdata);
+
+    /** scope covered by header cells
+     * @param scope
+     * @return the current element builder
+     */
+    _Cell $scope(Scope scope);
+
+    /** number of rows spanned by cell. default: 1
+     * @param rows
+     * @return the current element builder
+     */
+    _Cell $rowspan(int rows);
+
+    /** number of cols spanned by cell. default: 1
+     * @param cols
+     * @return the current element builder
+     */
+    _Cell $colspan(int cols);
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface TH extends _Cell {
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface TD extends _Cell {
+  }
+
+  /**
+   *
+   */
+  public interface _Head extends HeadMisc {
+    /**
+     * Add a TITLE element.
+     * @return a new TITLE element builder
+     */
+    TITLE title();
+
+    /**
+     * Add a TITLE element.
+     * @param cdata the content
+     * @return the current element builder
+     */
+    _Head title(String cdata);
+
+    /**
+     * Add a BASE element.
+     * @return a new BASE element builder
+     */
+    BASE base();
+
+    /**
+     * Add a complete BASE element.
+     * @param uri
+     * @return the current element builder
+     */
+    _Head base(String uri);
+  }
+
+  /**
+   *
+   */
+  public interface HEAD extends I18nAttrs, _Head, _Child {
+    // $profile omitted
+  }
+
+  /**
+   *
+   */
+  public interface TITLE extends I18nAttrs, PCData, _Child {
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface BASE extends _Child {
+    /** URI that acts as base URI
+     * @param uri
+     * @return the current element builder
+     */
+    BASE $href(String uri);
+  }
+
+  /**
+   *
+   */
+  @Element(endTag=false)
+  public interface META extends I18nAttrs, _Child {
+    /** HTTP response header name
+     * @param header
+     * @return the current element builder
+     */
+    META $http_equiv(String header);
+
+    /** metainformation name
+     * @param name
+     * @return the current element builder
+     */
+    META $name(String name);
+
+    /** associated information
+     * @param cdata
+     * @return the current element builder
+     */
+    META $content(String cdata);
+
+    // $scheme omitted
+  }
+
+  /**
+   *
+   */
+  public interface STYLE extends I18nAttrs, _Content, _Child {
+    /** content type of style language
+     * @param cdata
+     * @return the current element builder
+     */
+    STYLE $type(String cdata);
+
+    /** designed for use with these media
+     * @param media
+     * @return the current element builder
+     */
+    STYLE $media(EnumSet<Media> media);
+
+    /** advisory title
+     * @param cdata
+     * @return the current element builder
+     */
+    STYLE $title(String cdata);
+  }
+
+  /**
+   *
+   */
+  public interface SCRIPT extends _Content, _Child {
+    /** char encoding of linked resource
+     * @param cdata
+     * @return the current element builder
+     */
+    SCRIPT $charset(String cdata);
+
+    /** content type of script language
+     * @param cdata
+     * @return the current element builder
+     */
+    SCRIPT $type(String cdata);
+
+    /** URI for an external script
+     * @param cdata
+     * @return the current element builder
+     */
+    SCRIPT $src(String cdata);
+
+    /** UA may defer execution of script
+     * @param cdata
+     * @return the current element builder
+     */
+    SCRIPT $defer(String cdata);
+  }
+
+  /**
+   *
+   */
+  public interface _Html extends _Head, _Body, __ {
+    /**
+     * Add a HEAD element.
+     * @return a new HEAD element builder
+     */
+    HEAD head();
+
+    /**
+     * Add a BODY element.
+     * @return a new BODY element builder
+     */
+    BODY body();
+
+    /**
+     * Add a BODY element.
+     * @param selector the css selector in the form of (#id)*(.class)*
+     * @return a new BODY element builder
+     */
+    BODY body(String selector);
+  }
+
+  // There is only one HEAD and BODY, in that order.
+  /**
+   * The root element
+   */
+  public interface HTML extends I18nAttrs, _Html {
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/package-info.java
new file mode 100644
index 0000000..64a8447
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/package-info.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package is the replacement for org.apache.hadoop.yarn.webapp.hamlet.
+ * The old package is using _ as a one-character identifier,
+ * which is banned from JDK9.
+ */
+@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
+package org.apache.hadoop.yarn.webapp.hamlet2;
+import org.apache.hadoop.classification.InterfaceAudience;
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
index 1da6e23..0c7e09e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
@@ -44,10 +44,9 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat;
 import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Times;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.PRE;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.PRE;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -84,12 +83,12 @@ public class AggregatedLogsBlock extends HtmlBlock {
     if (!conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
         YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) {
       html.h1()
-          ._("Aggregation is not enabled. Try the nodemanager at " + nodeId)
-          ._();
+          .__("Aggregation is not enabled. Try the nodemanager at " + nodeId)
+          .__();
       if(nmApplicationLogUrl != null) {
         html.h1()
-            ._("Or see application log at " + nmApplicationLogUrl)
-            ._();
+            .__("Or see application log at " + nmApplicationLogUrl)
+            .__();
       }
       return;
     }
@@ -110,18 +109,18 @@ public class AggregatedLogsBlock extends HtmlBlock {
             .listStatus(remoteAppDir);
     } catch (FileNotFoundException fnf) {
       html.h1()
-          ._("Logs not available for " + logEntity
+          .__("Logs not available for " + logEntity
               + ". Aggregation may not be complete, "
-              + "Check back later or try the nodemanager at " + nodeId)._();
+              + "Check back later or try the nodemanager at " + nodeId).__();
       if(nmApplicationLogUrl != null)  {
         html.h1()
-            ._("Or see application log at " + nmApplicationLogUrl)
-            ._();
+            .__("Or see application log at " + nmApplicationLogUrl)
+            .__();
       }
       return;
     } catch (Exception ex) {
       html.h1()
-          ._("Error getting logs at " + nodeId)._();
+          .__("Error getting logs at " + nodeId).__();
       return;
     }
 
@@ -168,9 +167,9 @@ public class AggregatedLogsBlock extends HtmlBlock {
           if (callerUGI != null && !aclsManager.checkAccess(callerUGI,
               ApplicationAccessType.VIEW_APP, owner, applicationId)) {
             html.h1()
-                ._("User [" + remoteUser
+                .__("User [" + remoteUser
                     + "] is not authorized to view the logs for " + logEntity
-                    + " in log file [" + thisNodeFile.getPath().getName() + "]")._();
+                    + " in log file [" + thisNodeFile.getPath().getName() + "]").__();
             LOG.error("User [" + remoteUser
               + "] is not authorized to view the logs for " + logEntity);
             continue;
@@ -188,8 +187,9 @@ public class AggregatedLogsBlock extends HtmlBlock {
           LOG.error("Error getting logs for " + logEntity, ex);
           continue;
         } finally {
-          if (reader != null)
+          if (reader != null) {
             reader.close();
+          }
         }
       }
       if (!foundLog) {
@@ -201,7 +201,7 @@ public class AggregatedLogsBlock extends HtmlBlock {
         }
       }
     } catch (IOException e) {
-      html.h1()._("Error getting logs for " + logEntity)._();
+      html.h1().__("Error getting logs for " + logEntity).__();
       LOG.error("Error getting logs for " + logEntity, e);
     }
   }
@@ -219,12 +219,12 @@ public class AggregatedLogsBlock extends HtmlBlock {
           || desiredLogType.equals(logType)) {
         long logLength = logReader.getCurrentLogLength();
         if (foundLog) {
-          html.pre()._("\n\n")._();
+          html.pre().__("\n\n").__();
         }
 
-        html.p()._("Log Type: " + logType)._();
-        html.p()._("Log Upload Time: " + Times.format(logUpLoadTime))._();
-        html.p()._("Log Length: " + Long.toString(logLength))._();
+        html.p().__("Log Type: " + logType).__();
+        html.p().__("Log Upload Time: " + Times.format(logUpLoadTime)).__();
+        html.p().__("Log Length: " + Long.toString(logLength)).__();
 
         long start = logLimits.start < 0
             ? logLength + logLimits.start : logLimits.start;
@@ -238,12 +238,12 @@ public class AggregatedLogsBlock extends HtmlBlock {
 
         long toRead = end - start;
         if (toRead < logLength) {
-            html.p()._("Showing " + toRead + " bytes of " + logLength
+            html.p().__("Showing " + toRead + " bytes of " + logLength
                 + " total. Click ")
                 .a(url("logs", $(NM_NODENAME), $(CONTAINER_ID),
                     $(ENTITY_STRING), $(APP_OWNER),
                     logType, "?start=0"), "here").
-                    _(" for the full log.")._();
+                __(" for the full log.").__();
         }
 
         long totalSkipped = 0;
@@ -267,12 +267,12 @@ public class AggregatedLogsBlock extends HtmlBlock {
 
         while (toRead > 0
             && (len = logReader.read(cbuf, 0, currentToRead)) > 0) {
-          pre._(new String(cbuf, 0, len));
+          pre.__(new String(cbuf, 0, len));
           toRead = toRead - len;
           currentToRead = toRead > bufferSize ? bufferSize : (int) toRead;
         }
 
-        pre._();
+        pre.__();
         foundLog = true;
       }
 
@@ -285,7 +285,7 @@ public class AggregatedLogsBlock extends HtmlBlock {
   private ContainerId verifyAndGetContainerId(Block html) {
     String containerIdStr = $(CONTAINER_ID);
     if (containerIdStr == null || containerIdStr.isEmpty()) {
-      html.h1()._("Cannot get container logs without a ContainerId")._();
+      html.h1().__("Cannot get container logs without a ContainerId").__();
       return null;
     }
     ContainerId containerId = null;
@@ -293,8 +293,8 @@ public class AggregatedLogsBlock extends HtmlBlock {
       containerId = ContainerId.fromString(containerIdStr);
     } catch (IllegalArgumentException e) {
       html.h1()
-          ._("Cannot get container logs for invalid containerId: "
-              + containerIdStr)._();
+          .__("Cannot get container logs for invalid containerId: "
+              + containerIdStr).__();
       return null;
     }
     return containerId;
@@ -303,15 +303,15 @@ public class AggregatedLogsBlock extends HtmlBlock {
   private NodeId verifyAndGetNodeId(Block html) {
     String nodeIdStr = $(NM_NODENAME);
     if (nodeIdStr == null || nodeIdStr.isEmpty()) {
-      html.h1()._("Cannot get container logs without a NodeId")._();
+      html.h1().__("Cannot get container logs without a NodeId").__();
       return null;
     }
     NodeId nodeId = null;
     try {
       nodeId = NodeId.fromString(nodeIdStr);
     } catch (IllegalArgumentException e) {
-      html.h1()._("Cannot get container logs. Invalid nodeId: " + nodeIdStr)
-          ._();
+      html.h1().__("Cannot get container logs. Invalid nodeId: " + nodeIdStr)
+          .__();
       return null;
     }
     return nodeId;
@@ -320,7 +320,7 @@ public class AggregatedLogsBlock extends HtmlBlock {
   private String verifyAndGetAppOwner(Block html) {
     String appOwner = $(APP_OWNER);
     if (appOwner == null || appOwner.isEmpty()) {
-      html.h1()._("Cannot get container logs without an app owner")._();
+      html.h1().__("Cannot get container logs without an app owner").__();
     }
     return appOwner;
   }
@@ -341,7 +341,7 @@ public class AggregatedLogsBlock extends HtmlBlock {
         start = Long.parseLong(startStr);
       } catch (NumberFormatException e) {
         isValid = false;
-        html.h1()._("Invalid log start value: " + startStr)._();
+        html.h1().__("Invalid log start value: " + startStr).__();
       }
     }
 
@@ -351,7 +351,7 @@ public class AggregatedLogsBlock extends HtmlBlock {
         end = Long.parseLong(endStr);
       } catch (NumberFormatException e) {
         isValid = false;
-        html.h1()._("Invalid log end value: " + endStr)._();
+        html.h1().__("Invalid log end value: " + endStr).__();
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsNavBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsNavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsNavBlock.java
index fe83eaa..a6e3a05 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsNavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsNavBlock.java
@@ -28,7 +28,7 @@ public class AggregatedLogsNavBlock extends HtmlBlock {
   protected void render(Block html) {
     html
       .div("#nav")
-        .h3()._("Logs")._() // 
-      ._();
+        .h3().__("Logs").__()
+      .__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsPage.java
index 773738f..f097b0d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsPage.java
@@ -37,7 +37,7 @@ public class AggregatedLogsPage extends TwoColumnLayout {
    * @see org.apache.hadoop.yarn.server.nodemanager.webapp.NMView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
    */
   @Override
-  protected void preHead(Page.HTML<_> html) {
+  protected void preHead(Page.HTML<__> html) {
     String logEntity = $(ENTITY_STRING);
     if (logEntity == null || logEntity.isEmpty()) {
       logEntity = $(CONTAINER_ID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/ErrorPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/ErrorPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/ErrorPage.java
index 68e09ad..fabb5c1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/ErrorPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/ErrorPage.java
@@ -30,24 +30,24 @@ import org.apache.hadoop.classification.InterfaceAudience;
 public class ErrorPage extends HtmlPage {
 
   @Override
-  protected void render(Page.HTML<_> html) {
+  protected void render(Page.HTML<__> html) {
     set(JQueryUI.ACCORDION_ID, "msg");
     String title = "Sorry, got error "+ status();
     html.
       title(title).
-      link(root_url("static","yarn.css")).
-      _(JQueryUI.class). // an embedded sub-view
+      link(root_url("static", "yarn.css")).
+        __(JQueryUI.class). // an embedded sub-view
       style("#msg { margin: 1em auto; width: 88%; }",
             "#msg h1 { padding: 0.2em 1.5em; font: bold 1.3em serif; }").
       div("#msg").
         h1(title).
         div().
-          _("Please consult").
+        __("Please consult").
           a("http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html",
-            "RFC 2616")._(" for meanings of the error code.")._().
+            "RFC 2616").__(" for meanings of the error code.").__().
         h1("Error Details").
         pre().
-          _(errorDetails())._()._()._();
+        __(errorDetails()).__().__().__();
   }
 
   protected String errorDetails() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/FooterBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/FooterBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/FooterBlock.java
index ba85ac6..e4d1f2f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/FooterBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/FooterBlock.java
@@ -25,6 +25,6 @@ public class FooterBlock extends HtmlBlock {
 
   @Override protected void render(Block html) {
     html.
-      div("#footer.ui-widget")._();
+      div("#footer.ui-widget").__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HeaderBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HeaderBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HeaderBlock.java
index 03f0fb1..3a0f35a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HeaderBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HeaderBlock.java
@@ -31,9 +31,9 @@ public class HeaderBlock extends HtmlBlock {
     html.
       div("#header.ui-widget").
         div("#user").
-          _(loggedIn)._().
+        __(loggedIn).__().
         div("#logo").
-          img("/static/hadoop-st.png")._().
-        h1($(TITLE))._();
+          img("/static/hadoop-st.png").__().
+        h1($(TITLE)).__();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c6fa5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java
index a785c0c..acf040e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.webapp.MimeType;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.WebAppException;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 
 @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
 public abstract class HtmlBlock extends TextView implements SubView {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: Revert "MAPREDUCE-6199. AbstractCounters are not reset completely on deserialization (adhoot via rkanter)"

Posted by xg...@apache.org.
Revert "MAPREDUCE-6199. AbstractCounters are not reset completely on deserialization (adhoot via rkanter)"

This reverts commit 390a7c12f543b2c94a74f08d6d2a28410472043a.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/713349a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/713349a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/713349a9

Branch: refs/heads/YARN-5734
Commit: 713349a9af4654dad4d4c372454552bfd063ca5f
Parents: e15f928
Author: Junping Du <ju...@apache.org>
Authored: Fri Jul 28 14:21:04 2017 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Fri Jul 28 14:21:04 2017 -0700

----------------------------------------------------------------------
 .../mapreduce/counters/AbstractCounters.java    |  4 --
 .../hadoop/mapreduce/counters/Limits.java       |  7 ----
 .../apache/hadoop/mapreduce/TestCounters.java   | 39 +-------------------
 3 files changed, 1 insertion(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/713349a9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
index e6e74da..4ab7e89 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
@@ -307,10 +307,6 @@ public abstract class AbstractCounters<C extends Counter,
       fgroups.put(group.getName(), group);
     }
     int numGroups = WritableUtils.readVInt(in);
-    if (!groups.isEmpty()) {
-      groups.clear();
-      limits.reset();
-    }
     while (numGroups-- > 0) {
       limits.checkGroups(groups.size() + 1);
       G group = groupFactory.newGenericGroup(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/713349a9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
index 9546c8d..3821694 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
@@ -124,15 +124,8 @@ public class Limits {
     return firstViolation;
   }
 
-  // This allows initialization of global settings and not for an instance
   public static synchronized void reset(Configuration conf) {
     isInited = false;
     init(conf);
   }
-
-  // This allows resetting of an instance to allow reuse
-  public synchronized void reset() {
-    totalCounters = 0;
-    firstViolation = null;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/713349a9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestCounters.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestCounters.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestCounters.java
index 0215568..83d689c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestCounters.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestCounters.java
@@ -17,12 +17,8 @@
  */
 package org.apache.hadoop.mapreduce;
 
-import java.io.IOException;
 import java.util.Random;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.DataInputBuffer;
-import org.apache.hadoop.io.DataOutputBuffer;
 import org.junit.Test;
 import static org.junit.Assert.*;
 
@@ -74,40 +70,7 @@ public class TestCounters {
       testMaxGroups(new Counters());
     }
   }
-
-  @Test public void testResetOnDeserialize() throws IOException {
-    // Allow only one counterGroup
-    Configuration conf = new Configuration();
-    conf.setInt(MRJobConfig.COUNTER_GROUPS_MAX_KEY, 1);
-    Limits.init(conf);
-
-    Counters countersWithOneGroup = new Counters();
-    countersWithOneGroup.findCounter("firstOf1Allowed", "First group");
-    boolean caughtExpectedException = false;
-    try {
-      countersWithOneGroup.findCounter("secondIsTooMany", "Second group");
-    }
-    catch (LimitExceededException _) {
-      caughtExpectedException = true;
-    }
-
-    assertTrue("Did not throw expected exception",
-        caughtExpectedException);
-
-    Counters countersWithZeroGroups = new Counters();
-    DataOutputBuffer out = new DataOutputBuffer();
-    countersWithZeroGroups.write(out);
-
-    DataInputBuffer in = new DataInputBuffer();
-    in.reset(out.getData(), out.getLength());
-
-    countersWithOneGroup.readFields(in);
-
-    // After reset one should be able to add a group
-    countersWithOneGroup.findCounter("firstGroupAfterReset", "After reset " +
-        "limit should be set back to zero");
-  }
-
+  
   @Test
   public void testCountersIncrement() {
     Counters fCounters = new Counters();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org