You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2018/05/22 20:14:29 UTC

[01/50] [abbrv] hadoop git commit: MAPREDUCE-7094. LocalDistributedCacheManager leaves classloaders open, which leaks FDs. Contributed by Adam Szita.

Repository: hadoop
Updated Branches:
  refs/heads/HDDS-48 2d00a0c71 -> 60821fb20


MAPREDUCE-7094. LocalDistributedCacheManager leaves classloaders open, which leaks FDs. Contributed by Adam Szita.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2cdffb9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2cdffb9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2cdffb9

Branch: refs/heads/HDDS-48
Commit: a2cdffb95acbcb3625ee72ebc8aeb8bf17fa4bc7
Parents: cc3600a
Author: Miklos Szegedi <sz...@apache.org>
Authored: Thu May 17 10:13:43 2018 -0700
Committer: Miklos Szegedi <sz...@apache.org>
Committed: Thu May 17 11:16:04 2018 -0700

----------------------------------------------------------------------
 .../mapred/LocalDistributedCacheManager.java    | 31 ++++++++++++++++----
 .../apache/hadoop/mapred/LocalJobRunner.java    | 14 ++++++---
 2 files changed, 36 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2cdffb9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
index bcf73d1..1565e2e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
@@ -73,6 +73,7 @@ class LocalDistributedCacheManager {
   private List<String> localClasspaths = new ArrayList<String>();
   
   private List<File> symlinksCreated = new ArrayList<File>();
+  private URLClassLoader classLoaderCreated = null;
   
   private boolean setupCalled = false;
   
@@ -82,7 +83,7 @@ class LocalDistributedCacheManager {
    * @param conf
    * @throws IOException
    */
-  public void setup(JobConf conf, JobID jobId) throws IOException {
+  public synchronized void setup(JobConf conf, JobID jobId) throws IOException {
     File workDir = new File(System.getProperty("user.dir"));
     
     // Generate YARN local resources objects corresponding to the distributed
@@ -212,7 +213,7 @@ class LocalDistributedCacheManager {
    * Should be called after setup().
    * 
    */
-  public boolean hasLocalClasspaths() {
+  public synchronized boolean hasLocalClasspaths() {
     if (!setupCalled) {
       throw new IllegalStateException(
           "hasLocalClasspaths() should be called after setup()");
@@ -224,8 +225,11 @@ class LocalDistributedCacheManager {
    * Creates a class loader that includes the designated
    * files and archives.
    */
-  public ClassLoader makeClassLoader(final ClassLoader parent)
+  public synchronized ClassLoader makeClassLoader(final ClassLoader parent)
       throws MalformedURLException {
+    if (classLoaderCreated != null) {
+      throw new IllegalStateException("A classloader was already created");
+    }
     final URL[] urls = new URL[localClasspaths.size()];
     for (int i = 0; i < localClasspaths.size(); ++i) {
       urls[i] = new File(localClasspaths.get(i)).toURI().toURL();
@@ -234,12 +238,29 @@ class LocalDistributedCacheManager {
     return AccessController.doPrivileged(new PrivilegedAction<ClassLoader>() {
       @Override
       public ClassLoader run() {
-        return new URLClassLoader(urls, parent);
+        classLoaderCreated = new URLClassLoader(urls, parent);
+        return classLoaderCreated;
       }
     });
   }
 
-  public void close() throws IOException {
+  public synchronized void close() throws IOException {
+    if(classLoaderCreated != null) {
+      AccessController.doPrivileged(new PrivilegedAction<Void>() {
+        @Override
+        public Void run() {
+          try {
+            classLoaderCreated.close();
+            classLoaderCreated = null;
+          } catch (IOException e) {
+            LOG.warn("Failed to close classloader created " +
+                "by LocalDistributedCacheManager");
+          }
+          return null;
+        }
+      });
+    }
+
     for (File symlink : symlinksCreated) {
       if (!symlink.delete()) {
         LOG.warn("Failed to delete symlink created by the local job runner: " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2cdffb9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
index 2ab4e76..0f1d759 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
@@ -593,10 +593,16 @@ public class LocalJobRunner implements ClientProtocol {
 
       } finally {
         try {
-          fs.delete(systemJobFile.getParent(), true);  // delete submit dir
-          localFs.delete(localJobFile, true);              // delete local copy
-          // Cleanup distributed cache
-          localDistributedCacheManager.close();
+          try {
+            // Cleanup distributed cache
+            localDistributedCacheManager.close();
+          } finally {
+            try {
+              fs.delete(systemJobFile.getParent(), true); // delete submit dir
+            } finally {
+              localFs.delete(localJobFile, true);         // delete local copy
+            }
+          }
         } catch (IOException e) {
           LOG.warn("Error cleaning up "+id+": "+e);
         }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: YARN-8293. Removed "User Name for service" for deploying secure YARN service. Contributed by Sunil G

Posted by ar...@apache.org.
YARN-8293.  Removed "User Name for service" for deploying secure YARN service.
            Contributed by Sunil G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7802af6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7802af6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7802af6e

Branch: refs/heads/HDDS-48
Commit: 7802af6e9a85f033e9515cc7b23c125a0e06c325
Parents: 26f1e22
Author: Eric Yang <ey...@apache.org>
Authored: Thu May 17 17:06:58 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Thu May 17 17:06:58 2018 -0400

----------------------------------------------------------------------
 .../main/webapp/app/adapters/yarn-servicedef.js |  4 ++-
 .../webapp/app/components/deploy-service.js     |  5 +++
 .../src/main/webapp/app/initializers/loader.js  | 32 ++++++++++++++++++++
 .../src/main/webapp/app/services/hosts.js       |  5 +++
 .../app/templates/components/deploy-service.hbs | 18 ++++++-----
 .../src/main/webapp/config/default-config.js    |  3 +-
 6 files changed, 57 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7802af6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
index 03685fb..9000d74 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
@@ -26,7 +26,9 @@ export default RESTAbstractAdapter.extend({
 
   deployService(request, user) {
     var url = this.buildURL();
-    url += "/?user.name=" + user;
+    if(user) {
+      url += "/?user.name=" + user;
+    }
     return this.ajax(url, "POST", {data: request});
   },
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7802af6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
index 36895d7..18e4d36 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
@@ -28,6 +28,7 @@ export default Ember.Component.extend({
   serviceResp: null,
   isLoading: false,
   userName: '',
+  hosts: Ember.inject.service('hosts'),
 
   actions: {
     showSaveTemplateModal() {
@@ -157,6 +158,10 @@ export default Ember.Component.extend({
 
   isValidCustomServiceDef: Ember.computed.notEmpty('customServiceDef'),
 
+  isSecurityNotEnabled: Ember.computed('isSecurityEnabled', function () {
+    return this.get(`hosts.isSecurityEnabled`) === 'simple';
+  }),
+
   enableSaveOrDeployBtn: Ember.computed('isValidServiceDef', 'isValidCustomServiceDef', 'viewType', 'isLoading', 'isUserNameGiven', function() {
     if (this.get('isLoading')) {
       return false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7802af6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
index 43b5065..53f9c44 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -75,6 +75,15 @@ function getTimeLineV1URL(rmhost, isHttpsSchemeEnabled) {
   return url;
 }
 
+function getSecurityURL(rmhost) {
+  var url = window.location.protocol + '//' +
+    (ENV.hosts.localBaseAddress? ENV.hosts.localBaseAddress + '/' : '') + rmhost;
+
+  url += '/conf?name=hadoop.security.authentication';
+  Ember.Logger.log("Server security mode url is: " + url);
+  return url;
+}
+
 function updateConfigs(application) {
   var hostname = window.location.hostname;
   var rmhost = hostname + (window.location.port ? ':' + window.location.port: '') + skipTrailingSlash(window.location.pathname);
@@ -156,6 +165,29 @@ function updateConfigs(application) {
     Ember.Logger.log("Timeline V1 Address: " + ENV.hosts.timelineV1WebAddress);
     application.advanceReadiness();
   }
+
+  if(!ENV.hosts.isSecurityEnabled) {
+    var isSecurityEnabled = "";
+    $.ajax({
+      type: 'GET',
+      dataType: 'json',
+      async: true,
+      context: this,
+      url: getSecurityURL(rmhost),
+      success: function(data) {
+        isSecurityEnabled = data.property.value;
+        ENV.hosts.isSecurityEnabled = isSecurityEnabled;
+        Ember.Logger.log("Security mode is : " + isSecurityEnabled);
+        application.advanceReadiness();
+      },
+      error: function() {
+        application.advanceReadiness();
+      }
+    });
+  } else {
+    Ember.Logger.log("Security mode is: " + ENV.hosts.isSecurityEnabled);
+    application.advanceReadiness();
+  }
 }
 
 export function initialize( application ) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7802af6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js
index 738f5f1..a53451b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js
@@ -82,5 +82,10 @@ export default Ember.Service.extend({
 
   dashWebAddress: Ember.computed(function () {
     return this.normalizeURL(this.get("env.app.hosts.dashWebAddress"));
+  }),
+
+  isSecurityEnabled: Ember.computed(function () {
+    console.log(this.get("env.app.hosts.isSecurityEnabled"));
+    return this.get("env.app.hosts.isSecurityEnabled");
   })
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7802af6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs
index 720074e..2d18610 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs
@@ -29,16 +29,18 @@
       </div>
     </div>
   {{/if}}
-  <div class="row">
-    <div class="col-md-4">
-      <div class="form-group shrink-height">
-        <label class="required">User Name for service</label>
-        <span class="glyphicon glyphicon-info-sign info-icon" data-info="userName"></span>
-        {{input type="text" class="form-control" placeholder="User Name" value=userName}}
+  {{#if isSecurityNotEnabled}}
+    <div class="row">
+      <div class="col-md-4">
+        <div class="form-group shrink-height">
+          <label class="required">User Name for service</label>
+          <span class="glyphicon glyphicon-info-sign info-icon" data-info="userName"></span>
+          {{input type="text" class="form-control" placeholder="User Name" value=userName}}
+        </div>
+        <br>
       </div>
-      <br>
     </div>
-  </div>
+  {{/if}}
   <div class="panel panel-default {{if isLoading 'loading-state'}}">
     {{#if isLoading}}
       <img src="assets/images/spinner.gif" alt="Loading...">

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7802af6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js
index ff95115..3d37796 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js
@@ -22,7 +22,8 @@ module.exports = { // YARN UI App configurations
       timelineWebAddress: "localhost:8188",
       timelineV1WebAddress: "localhost:8188",
       rmWebAddress: "localhost:8088",
-      protocolScheme: "http:"
+      protocolScheme: "http:",
+      isSecurityEnabled: ""
     },
     namespaces: {
       timeline: 'ws/v1/applicationhistory',


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/css/ozonedoc.css
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/css/ozonedoc.css b/hadoop-ozone/docs/themes/ozonedoc/static/css/ozonedoc.css
new file mode 100644
index 0000000..39fae72
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/static/css/ozonedoc.css
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Base structure
+ */
+
+/* Move down content because we have a fixed navbar that is 50px tall */
+body {
+  padding-top: 50px;
+  font-size: 150%;
+}
+
+
+/*
+ * Global add-ons
+ */
+
+.sub-header {
+  padding-bottom: 10px;
+  border-bottom: 1px solid #eee;
+}
+
+/*
+ * Top navigation
+ * Hide default border to remove 1px line.
+ */
+.navbar-fixed-top {
+  border: 0;
+}
+
+/*
+ * Sidebar
+ */
+
+/* Hide for mobile, show later */
+.sidebar {
+  display: none;
+}
+@media (min-width: 768px) {
+  .sidebar {
+    position: fixed;
+    top: 51px;
+    bottom: 0;
+    left: 0;
+    z-index: 1000;
+    display: block;
+    padding: 20px;
+    overflow-x: hidden;
+    overflow-y: auto; /* Scrollable contents if viewport is shorter than content. */
+    background-color: #f5f5f5;
+    border-right: 1px solid #eee;
+  }
+}
+
+/* Sidebar navigation */
+.nav-sidebar {
+  margin-right: -21px; /* 20px padding + 1px border */
+  margin-bottom: 20px;
+  margin-left: -20px;
+}
+.nav-sidebar > li > a {
+  padding-right: 20px;
+  padding-left: 20px;
+}
+.nav-sidebar > li > ul > li > a {
+  padding-right: 40px;
+  padding-left: 40px;
+}
+.nav-sidebar  .active > a,
+.nav-sidebar  .active > a:hover,
+.nav-sidebar  .active > a:focus {
+  color: #fff;
+  background-color: #428bca;
+}
+
+
+/*
+ * Main content
+ */
+
+.main {
+  padding: 20px;
+}
+@media (min-width: 768px) {
+  .main {
+    padding-right: 40px;
+    padding-left: 40px;
+  }
+}
+.main .page-header {
+  margin-top: 0;
+}
+
+
+/*
+ * Placeholder dashboard ideas
+ */
+
+.placeholders {
+  margin-bottom: 30px;
+  text-align: center;
+}
+.placeholders h4 {
+  margin-bottom: 0;
+}
+.placeholder {
+  margin-bottom: 20px;
+}
+.placeholder img {
+  display: inline-block;
+  border-radius: 50%;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.eot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.eot b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.eot
new file mode 100644
index 0000000..b93a495
Binary files /dev/null and b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.eot differ


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: HADOOP-15474. Rename properties introduced for . Contributed by Zsolt Venczel.

Posted by ar...@apache.org.
HADOOP-15474. Rename properties introduced for <tags>. Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57c2feb0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57c2feb0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57c2feb0

Branch: refs/heads/HDDS-48
Commit: 57c2feb0d3ed0bb4f8642300433a35f5e28071c9
Parents: 5e88126
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Tue May 22 13:33:31 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Tue May 22 13:33:31 2018 +0530

----------------------------------------------------------------------
 .../org/apache/hadoop/conf/Configuration.java     | 18 +++++++++++++++---
 .../hadoop/fs/CommonConfigurationKeysPublic.java  | 15 +++++++++++++++
 .../src/main/resources/core-default.xml           | 11 ++++++++++-
 .../org/apache/hadoop/conf/TestConfiguration.java |  4 ++--
 4 files changed, 42 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57c2feb0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 52f20b0..19bd5da 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -198,8 +198,8 @@ import static org.apache.commons.lang3.StringUtils.isNotBlank;
  * <h4 id="Tags">Tags</h4>
  *
  * <p>Optionally we can tag related properties together by using tag
- * attributes. System tags are defined by hadoop.system.tags property. Users
- * can define there own custom tags in  hadoop.custom.tags property.
+ * attributes. System tags are defined by hadoop.tags.system property. Users
+ * can define there own custom tags in  hadoop.tags.custom property.
  *
  * <p>For example, we can tag existing property as:
  * <tt><pre>
@@ -3180,12 +3180,24 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   }
 
   /**
-   * Add tags defined in HADOOP_SYSTEM_TAGS, HADOOP_CUSTOM_TAGS.
+   * Add tags defined in HADOOP_TAGS_SYSTEM, HADOOP_TAGS_CUSTOM.
    * @param prop
    */
   public void addTags(Properties prop) {
     // Get all system tags
     try {
+      if (prop.containsKey(CommonConfigurationKeys.HADOOP_TAGS_SYSTEM)) {
+        String systemTags = prop.getProperty(CommonConfigurationKeys
+            .HADOOP_TAGS_SYSTEM);
+        Arrays.stream(systemTags.split(",")).forEach(tag -> TAGS.add(tag));
+      }
+      // Get all custom tags
+      if (prop.containsKey(CommonConfigurationKeys.HADOOP_TAGS_CUSTOM)) {
+        String customTags = prop.getProperty(CommonConfigurationKeys
+            .HADOOP_TAGS_CUSTOM);
+        Arrays.stream(customTags.split(",")).forEach(tag -> TAGS.add(tag));
+      }
+
       if (prop.containsKey(CommonConfigurationKeys.HADOOP_SYSTEM_TAGS)) {
         String systemTags = prop.getProperty(CommonConfigurationKeys
             .HADOOP_SYSTEM_TAGS);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57c2feb0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 8cd753a..8837cfb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -881,7 +881,22 @@ public class CommonConfigurationKeysPublic {
           "credential$",
           "oauth.*token$",
           HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS);
+
+  /**
+   * @deprecated Please use
+   * {@link CommonConfigurationKeysPublic#HADOOP_TAGS_SYSTEM} instead
+   * See https://issues.apache.org/jira/browse/HADOOP-15474
+   */
   public static final String HADOOP_SYSTEM_TAGS = "hadoop.system.tags";
+
+  /**
+   * @deprecated Please use
+   * {@link CommonConfigurationKeysPublic#HADOOP_TAGS_CUSTOM} instead
+   * See https://issues.apache.org/jira/browse/HADOOP-15474
+   */
   public static final String HADOOP_CUSTOM_TAGS = "hadoop.custom.tags";
+
+  public static final String HADOOP_TAGS_SYSTEM = "hadoop.tags.system";
+  public static final String HADOOP_TAGS_CUSTOM = "hadoop.tags.custom";
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57c2feb0/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 7ba23d4..fad2985 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -3035,7 +3035,16 @@
   <property>
     <name>hadoop.system.tags</name>
     <value>YARN,HDFS,NAMENODE,DATANODE,REQUIRED,SECURITY,KERBEROS,PERFORMANCE,CLIENT
-      ,SERVER,DEBUG,DEPRICATED,COMMON,OPTIONAL</value>
+      ,SERVER,DEBUG,DEPRECATED,COMMON,OPTIONAL</value>
+    <description>
+      Deprecated. Please use hadoop.tags.system instead.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.tags.system</name>
+    <value>YARN,HDFS,NAMENODE,DATANODE,REQUIRED,SECURITY,KERBEROS,PERFORMANCE,CLIENT
+      ,SERVER,DEBUG,DEPRECATED,COMMON,OPTIONAL</value>
     <description>
       System tags to group related properties together.
     </description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57c2feb0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 33a9880..e865bf1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -2361,8 +2361,8 @@ public class TestConfiguration {
     try{
       out = new BufferedWriter(new FileWriter(CONFIG_CORE));
       startConfig();
-      appendProperty("hadoop.system.tags", "YARN,HDFS,NAMENODE");
-      appendProperty("hadoop.custom.tags", "MYCUSTOMTAG");
+      appendProperty("hadoop.tags.system", "YARN,HDFS,NAMENODE");
+      appendProperty("hadoop.tags.custom", "MYCUSTOMTAG");
       appendPropertyByTag("dfs.cblock.trace.io", "false", "YARN");
       appendPropertyByTag("dfs.replication", "1", "HDFS");
       appendPropertyByTag("dfs.namenode.logging.level", "INFO", "NAMENODE");


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: YARN-7900. [AMRMProxy] AMRMClientRelayer for stateful FederationInterceptor. (Botong Huang via asuresh)

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159bffc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java
index e518b90..38181e2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.SerializedException;
 import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.client.AMRMClientUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
@@ -78,7 +79,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
-import org.apache.hadoop.yarn.server.utils.AMRMClientUtils;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: YARN-7960. Added security flag no-new-privileges for YARN Docker integration. Contributed by Eric Badger

Posted by ar...@apache.org.
YARN-7960.  Added security flag no-new-privileges for YARN Docker integration.
            Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6176d2b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6176d2b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6176d2b3

Branch: refs/heads/HDDS-48
Commit: 6176d2b35c85715aae93526236c29540f71ecac8
Parents: bcc8e76
Author: Eric Yang <ey...@apache.org>
Authored: Tue May 22 13:44:58 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Tue May 22 13:44:58 2018 -0400

----------------------------------------------------------------------
 .../hadoop-yarn/conf/container-executor.cfg     |  1 +
 .../container-executor/impl/utils/docker-util.c | 12 +++
 .../test/utils/test_docker_util.cc              | 90 ++++++++++++++++++++
 .../src/site/markdown/DockerContainers.md       |  1 +
 4 files changed, 104 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6176d2b3/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
index 36676b0..d19874f 100644
--- a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
+++ b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
@@ -15,6 +15,7 @@ feature.tc.enabled=false
 #  docker.allowed.rw-mounts=## comma seperate volumes that can be mounted as read-write, add the yarn local and log dirs to this list to run Hadoop jobs
 #  docker.privileged-containers.enabled=false
 #  docker.allowed.volume-drivers=## comma seperated list of allowed volume-drivers
+#  docker.no-new-privileges.enabled=## enable/disable the no-new-privileges flag for docker run. Set to "true" to enable, disabled by default
 
 # The configs below deal with settings for FPGA resource
 #[fpga]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6176d2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
index 099e5b5..d34a5b2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
@@ -1374,6 +1374,18 @@ int get_docker_run_command(const char *command_file, const struct configuration
       reset_args(args);
       return BUFFER_TOO_SMALL;
     }
+    char *no_new_privileges_enabled =
+        get_configuration_value("docker.no-new-privileges.enabled",
+        CONTAINER_EXECUTOR_CFG_DOCKER_SECTION, conf);
+    if (no_new_privileges_enabled != NULL &&
+        strcasecmp(no_new_privileges_enabled, "True") == 0) {
+      ret = add_to_args(args, "--security-opt=no-new-privileges");
+      if (ret != 0) {
+        reset_args(args);
+        return BUFFER_TOO_SMALL;
+      }
+    }
+    free(no_new_privileges_enabled);
   }
   free(privileged);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6176d2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
index e18bf63..613755c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
@@ -1545,4 +1545,94 @@ namespace ContainerExecutor {
 
     run_docker_command_test(file_cmd_vec, bad_file_cmd_vec, get_docker_volume_command);
   }
+
+  TEST_F(TestDockerUtil, test_docker_no_new_privileges) {
+
+    std::string container_executor_contents[] = {"[docker]\n"
+                                                     "  docker.privileged-containers.registries=hadoop\n"
+                                                     "  docker.privileged-containers.enabled=false\n"
+                                                     "  docker.no-new-privileges.enabled=true",
+                                                 "[docker]\n"
+                                                     "  docker.privileged-containers.registries=hadoop\n"
+                                                     "  docker.privileged-containers.enabled=true\n"
+                                                     "  docker.no-new-privileges.enabled=true",
+                                                 "[docker]\n"
+                                                     "  docker.privileged-containers.registries=hadoop\n"
+                                                     "  docker.privileged-containers.enabled=true\n"
+                                                     "  docker.no-new-privileges.enabled=true",
+                                                 "[docker]\n"
+                                                     "  docker.privileged-containers.registries=hadoop\n"
+                                                     "  docker.privileged-containers.enabled=false\n"
+                                                     "  docker.no-new-privileges.enabled=false",
+                                                 "[docker]\n"
+                                                     "  docker.privileged-containers.registries=hadoop\n"
+                                                     "  docker.privileged-containers.enabled=true\n"
+                                                     "  docker.no-new-privileges.enabled=false"};
+    for (int i = 0; i < 2; ++i) {
+      write_file(container_executor_cfg_file, container_executor_contents[i]);
+      int ret = read_config(container_executor_cfg_file.c_str(), &container_executor_cfg);
+      if (ret != 0) {
+        FAIL();
+      }
+      ret = create_ce_file();
+      if (ret != 0) {
+        std::cerr << "Could not create ce file, skipping test" << std::endl;
+        return;
+      }
+
+      std::vector<std::pair<std::string, std::string> > file_cmd_vec;
+      file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
+          "[docker-command-execution]\n  docker-command=run\n name=container_e1_12312_11111_02_000001\n"
+          "image=hadoop/docker-image\n  user=nobody",
+          "run --name=container_e1_12312_11111_02_000001 --user=nobody --security-opt=no-new-privileges "
+          "--cap-drop=ALL hadoop/docker-image"));
+
+      std::vector<std::pair<std::string, int> > bad_file_cmd_vec;
+      run_docker_command_test(file_cmd_vec, bad_file_cmd_vec, get_docker_run_command);
+    }
+
+    for (int i = 2; i < 3; ++i) {
+      write_file(container_executor_cfg_file, container_executor_contents[i]);
+      int ret = read_config(container_executor_cfg_file.c_str(), &container_executor_cfg);
+      if (ret != 0) {
+        FAIL();
+      }
+      ret = create_ce_file();
+      if (ret != 0) {
+        std::cerr << "Could not create ce file, skipping test" << std::endl;
+        return;
+      }
+
+      std::vector<std::pair<std::string, std::string> > file_cmd_vec;
+      file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
+          "[docker-command-execution]\n  docker-command=run\n privileged=true\n"
+          "name=container_e1_12312_11111_02_000001\n  image=hadoop/docker-image\n  user=root",
+          "run --name=container_e1_12312_11111_02_000001 --privileged --cap-drop=ALL hadoop/docker-image"));
+
+      std::vector<std::pair<std::string, int> > bad_file_cmd_vec;
+      run_docker_command_test(file_cmd_vec, bad_file_cmd_vec, get_docker_run_command);
+    }
+
+    for (int i = 3; i < 5; ++i) {
+      write_file(container_executor_cfg_file, container_executor_contents[i]);
+      int ret = read_config(container_executor_cfg_file.c_str(), &container_executor_cfg);
+      if (ret != 0) {
+        FAIL();
+      }
+      ret = create_ce_file();
+      if (ret != 0) {
+        std::cerr << "Could not create ce file, skipping test" << std::endl;
+        return;
+      }
+
+      std::vector<std::pair<std::string, std::string> > file_cmd_vec;
+      file_cmd_vec.push_back(std::make_pair<std::string, std::string>(
+          "[docker-command-execution]\n  docker-command=run\n name=container_e1_12312_11111_02_000001\n"
+          "image=hadoop/docker-image\n  user=nobody",
+          "run --name=container_e1_12312_11111_02_000001 --user=nobody --cap-drop=ALL hadoop/docker-image"));
+
+      std::vector<std::pair<std::string, int> > bad_file_cmd_vec;
+      run_docker_command_test(file_cmd_vec, bad_file_cmd_vec, get_docker_run_command);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6176d2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
index 3c39291..0f49a06 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
@@ -208,6 +208,7 @@ are allowed. It contains the following properties:
 | `docker.privileged-containers.enabled` | Set to "true" or "false" to enable or disable launching privileged containers. Default value is "false". |
 | `docker.privileged-containers.registries` | Comma separated list of trusted docker registries for running trusted privileged docker containers.  By default, no registries are defined. |
 | `docker.inspect.max.retries` | Integer value to check docker container readiness.  Each inspection is set with 3 seconds delay.  Default value of 10 will wait 30 seconds for docker container to become ready before marked as container failed. |
+| `docker.no-new-privileges.enabled` | Enable/disable the no-new-privileges flag for docker run. Set to "true" to enable, disabled by default. |
 
 Please note that if you wish to run Docker containers that require access to the YARN local directories, you must add them to the docker.allowed.rw-mounts list.
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: YARN-7530. Refactored YARN service API project location. Contributed by Chandni Singh

Posted by ar...@apache.org.
YARN-7530.  Refactored YARN service API project location.
            Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a23ff8d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a23ff8d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a23ff8d8

Branch: refs/heads/HDDS-48
Commit: a23ff8d88001ad8e4ac4c36fc1f7691d193dc1d0
Parents: 89f5911
Author: Eric Yang <ey...@apache.org>
Authored: Fri May 18 17:29:10 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Fri May 18 17:29:10 2018 -0400

----------------------------------------------------------------------
 .../resources/assemblies/hadoop-yarn-dist.xml   |   2 +-
 .../dev-support/findbugs-exclude.xml            |  20 -
 .../hadoop-yarn-services-api/pom.xml            | 144 ----
 .../yarn/service/client/ApiServiceClient.java   | 598 --------------
 .../client/SystemServiceManagerImpl.java        | 391 ---------
 .../yarn/service/client/package-info.java       |  28 -
 .../hadoop/yarn/service/webapp/ApiServer.java   | 818 -------------------
 .../yarn/service/webapp/ApiServerWebApp.java    | 161 ----
 .../yarn/service/webapp/package-info.java       |  28 -
 .../definition/YARN-Services-Examples.md        | 444 ----------
 ...RN-Simplified-V1-API-Layer-For-Services.yaml | 594 --------------
 .../src/main/resources/log4j-server.properties  |  76 --
 .../src/main/resources/webapps/api-server/app   |  16 -
 .../src/main/webapp/WEB-INF/web.xml             |  36 -
 .../hadoop/yarn/service/ServiceClientTest.java  | 210 -----
 .../hadoop/yarn/service/TestApiServer.java      | 623 --------------
 .../service/client/TestApiServiceClient.java    | 314 -------
 .../client/TestSystemServiceManagerImpl.java    | 182 -----
 .../src/test/resources/example-app.json         |  16 -
 .../src/test/resources/log4j.properties         |  19 -
 .../resources/system-services/bad/bad.yarnfile  |  16 -
 .../sync/user1/example-app1.yarnfile            |  16 -
 .../sync/user1/example-app2.yarnfile            |  16 -
 .../sync/user1/example-app3.json                |  16 -
 .../sync/user2/example-app1.yarnfile            |  16 -
 .../sync/user2/example-app2.yarnfile            |  16 -
 .../dev-support/findbugs-exclude.xml            |  20 +
 .../hadoop-yarn-services-api/pom.xml            | 144 ++++
 .../yarn/service/client/ApiServiceClient.java   | 598 ++++++++++++++
 .../client/SystemServiceManagerImpl.java        | 391 +++++++++
 .../yarn/service/client/package-info.java       |  28 +
 .../hadoop/yarn/service/webapp/ApiServer.java   | 818 +++++++++++++++++++
 .../yarn/service/webapp/ApiServerWebApp.java    | 161 ++++
 .../yarn/service/webapp/package-info.java       |  28 +
 .../definition/YARN-Services-Examples.md        | 444 ++++++++++
 ...RN-Simplified-V1-API-Layer-For-Services.yaml | 594 ++++++++++++++
 .../src/main/resources/log4j-server.properties  |  76 ++
 .../src/main/resources/webapps/api-server/app   |  16 +
 .../src/main/webapp/WEB-INF/web.xml             |  36 +
 .../hadoop/yarn/service/ServiceClientTest.java  | 210 +++++
 .../hadoop/yarn/service/TestApiServer.java      | 623 ++++++++++++++
 .../service/client/TestApiServiceClient.java    | 314 +++++++
 .../client/TestSystemServiceManagerImpl.java    | 182 +++++
 .../src/test/resources/example-app.json         |  16 +
 .../src/test/resources/log4j.properties         |  19 +
 .../resources/system-services/bad/bad.yarnfile  |  16 +
 .../sync/user1/example-app1.yarnfile            |  16 +
 .../sync/user1/example-app2.yarnfile            |  16 +
 .../sync/user1/example-app3.json                |  16 +
 .../sync/user2/example-app1.yarnfile            |  16 +
 .../sync/user2/example-app2.yarnfile            |  16 +
 .../hadoop-yarn-services/pom.xml                |   1 +
 .../hadoop-yarn-applications/pom.xml            |   1 -
 53 files changed, 4816 insertions(+), 4816 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
----------------------------------------------------------------------
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
index 382c967..a2ea08c 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
@@ -105,7 +105,7 @@
       </includes>
     </fileSet>
     <fileSet>
-      <directory>hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/target</directory>
+      <directory>hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/target</directory>
       <outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
       <includes>
         <include>*-sources.jar</include>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
deleted file mode 100644
index b89146a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<FindBugsFilter>
-
-</FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
deleted file mode 100644
index 354c9b5..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
+++ /dev/null
@@ -1,144 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-yarn-applications</artifactId>
-    <version>3.2.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-yarn-services-api</artifactId>
-  <name>Apache Hadoop YARN Services API</name>
-  <packaging>jar</packaging>
-  <description>Hadoop YARN REST APIs for services</description>
-
-  <build>
-
-    <!-- resources are filtered for dynamic updates. This gets build info in-->
-    <resources>
-      <resource>
-        <directory>src/main/resources</directory>
-        <filtering>true</filtering>
-      </resource>
-      <resource>
-        <directory>src/main/scripts/</directory>
-        <filtering>true</filtering>
-      </resource>
-    </resources>
-
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <!-- The configuration of the plugin -->
-        <configuration>
-          <!-- Configuration of the archiver -->
-          <archive>
-            <manifestEntries>
-              <mode>development</mode>
-              <url>${project.url}</url>
-            </manifestEntries>
-            <!-- Manifest specific configuration -->
-            <manifest>
-            </manifest>
-          </archive>
-        </configuration>
-        <executions>
-          <execution>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-        <configuration>
-          <excludes>
-            <exclude>**/*.json</exclude>
-            <exclude>**/*.yarnfile</exclude>
-          </excludes>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-
-  <reporting>
-  </reporting>
-
-  <dependencies>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-services-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-server-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jetty</groupId>
-      <artifactId>jetty-webapp</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.google.inject</groupId>
-      <artifactId>guice</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>javax.ws.rs</groupId>
-      <artifactId>jsr311-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-all</artifactId>
-      <scope>test</scope>
-    </dependency>
-
-    <!-- ======================================================== -->
-    <!-- Test dependencies -->
-    <!-- ======================================================== -->
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-
-  </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
deleted file mode 100644
index a8e2f51..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
+++ /dev/null
@@ -1,598 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.service.client;
-
-import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.jsonSerDeser;
-
-import java.io.File;
-import java.io.IOException;
-import java.text.MessageFormat;
-import java.util.List;
-import java.util.Map;
-
-import javax.ws.rs.core.MediaType;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.yarn.api.ApplicationConstants;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.client.api.AppAdminClient;
-import org.apache.hadoop.yarn.client.api.YarnClient;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.service.api.records.Component;
-import org.apache.hadoop.yarn.service.api.records.ComponentState;
-import org.apache.hadoop.yarn.service.api.records.Container;
-import org.apache.hadoop.yarn.service.api.records.ContainerState;
-import org.apache.hadoop.yarn.service.api.records.Service;
-import org.apache.hadoop.yarn.service.api.records.ServiceState;
-import org.apache.hadoop.yarn.service.api.records.ServiceStatus;
-import org.apache.hadoop.yarn.service.conf.RestApiConstants;
-import org.apache.hadoop.yarn.service.utils.JsonSerDeser;
-import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
-import org.apache.hadoop.yarn.util.RMHAUtils;
-import org.codehaus.jackson.map.PropertyNamingStrategy;
-import org.eclipse.jetty.util.UrlEncoded;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.sun.jersey.api.client.Client;
-import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.api.client.WebResource.Builder;
-import com.sun.jersey.api.client.config.ClientConfig;
-import com.sun.jersey.api.client.config.DefaultClientConfig;
-
-import static org.apache.hadoop.yarn.service.exceptions.LauncherExitCodes.*;
-
-/**
- * The rest API client for users to manage services on YARN.
- */
-public class ApiServiceClient extends AppAdminClient {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ApiServiceClient.class);
-  protected YarnClient yarnClient;
-
-  @Override protected void serviceInit(Configuration configuration)
-      throws Exception {
-    yarnClient = YarnClient.createYarnClient();
-    addService(yarnClient);
-    super.serviceInit(configuration);
-  }
-
-  /**
-   * Calculate Resource Manager address base on working REST API.
-   */
-  private String getRMWebAddress() {
-    Configuration conf = getConfig();
-    String scheme = "http://";
-    String path = "/app/v1/services/version";
-    String rmAddress = conf
-        .get("yarn.resourcemanager.webapp.address");
-    if (YarnConfiguration.useHttps(conf)) {
-      scheme = "https://";
-      rmAddress = conf
-          .get("yarn.resourcemanager.webapp.https.address");
-    }
-    boolean useKerberos = UserGroupInformation.isSecurityEnabled();
-    List<String> rmServers = RMHAUtils
-        .getRMHAWebappAddresses(new YarnConfiguration(conf));
-    for (String host : rmServers) {
-      try {
-        Client client = Client.create();
-        StringBuilder sb = new StringBuilder();
-        sb.append(scheme);
-        sb.append(host);
-        sb.append(path);
-        if (!useKerberos) {
-          try {
-            String username = UserGroupInformation.getCurrentUser().getShortUserName();
-            sb.append("?user.name=");
-            sb.append(username);
-          } catch (IOException e) {
-            LOG.debug("Fail to resolve username: {}", e);
-          }
-        }
-        WebResource webResource = client
-            .resource(sb.toString());
-        if (useKerberos) {
-          AuthenticatedURL.Token token = new AuthenticatedURL.Token();
-          webResource.header("WWW-Authenticate", token);
-        }
-        ClientResponse test = webResource.get(ClientResponse.class);
-        if (test.getStatus() == 200) {
-          rmAddress = host;
-          break;
-        }
-      } catch (Exception e) {
-        LOG.debug("Fail to connect to: "+host, e);
-      }
-    }
-    return scheme+rmAddress;
-  }
-
-  /**
-   * Compute active resource manager API service location.
-   *
-   * @param appName - YARN service name
-   * @return URI to API Service
-   * @throws IOException
-   */
-  private String getServicePath(String appName) throws IOException {
-    String url = getRMWebAddress();
-    StringBuilder api = new StringBuilder();
-    api.append(url);
-    api.append("/app/v1/services");
-    if (appName != null) {
-      api.append("/");
-      api.append(appName);
-    }
-    Configuration conf = getConfig();
-    if (conf.get("hadoop.http.authentication.type").equalsIgnoreCase("simple")) {
-      api.append("?user.name=" + UrlEncoded
-          .encodeString(System.getProperty("user.name")));
-    }
-    return api.toString();
-  }
-
-  private String getInstancesPath(String appName) throws IOException {
-    Preconditions.checkNotNull(appName);
-    String url = getRMWebAddress();
-    StringBuilder api = new StringBuilder();
-    api.append(url);
-    api.append("/app/v1/services/").append(appName).append("/")
-        .append(RestApiConstants.COMP_INSTANCES);
-    Configuration conf = getConfig();
-    if (conf.get("hadoop.http.authentication.type").equalsIgnoreCase(
-        "simple")) {
-      api.append("?user.name=" + UrlEncoded
-          .encodeString(System.getProperty("user.name")));
-    }
-    return api.toString();
-  }
-
-  private String getComponentsPath(String appName) throws IOException {
-    Preconditions.checkNotNull(appName);
-    String url = getRMWebAddress();
-    StringBuilder api = new StringBuilder();
-    api.append(url);
-    api.append("/app/v1/services/").append(appName).append("/")
-        .append(RestApiConstants.COMPONENTS);
-    Configuration conf = getConfig();
-    if (conf.get("hadoop.http.authentication.type").equalsIgnoreCase(
-        "simple")) {
-      api.append("?user.name=" + UrlEncoded
-          .encodeString(System.getProperty("user.name")));
-    }
-    return api.toString();
-  }
-
-  private Builder getApiClient() throws IOException {
-    return getApiClient(getServicePath(null));
-  }
-
-  /**
-   * Setup API service web request.
-   *
-   * @param requestPath
-   * @return
-   * @throws IOException
-   */
-  private Builder getApiClient(String requestPath)
-      throws IOException {
-    Client client = Client.create(getClientConfig());
-    Configuration conf = getConfig();
-    client.setChunkedEncodingSize(null);
-    Builder builder = client
-        .resource(requestPath).type(MediaType.APPLICATION_JSON);
-    if (conf.get("hadoop.http.authentication.type").equals("kerberos")) {
-      AuthenticatedURL.Token token = new AuthenticatedURL.Token();
-      builder.header("WWW-Authenticate", token);
-    }
-    return builder
-        .accept("application/json;charset=utf-8");
-  }
-
-  private ClientConfig getClientConfig() {
-    ClientConfig config = new DefaultClientConfig();
-    config.getProperties().put(
-        ClientConfig.PROPERTY_CHUNKED_ENCODING_SIZE, 0);
-    config.getProperties().put(
-        ClientConfig.PROPERTY_BUFFER_RESPONSE_ENTITY_ON_EXCEPTION, true);
-    return config;
-  }
-
-  private int processResponse(ClientResponse response) {
-    response.bufferEntity();
-    String output;
-    if (response.getStatus() == 401) {
-      LOG.error("Authentication required");
-      return EXIT_EXCEPTION_THROWN;
-    }
-    if (response.getStatus() == 503) {
-      LOG.error("YARN Service is unavailable or disabled.");
-      return EXIT_EXCEPTION_THROWN;
-    }
-    try {
-      ServiceStatus ss = response.getEntity(ServiceStatus.class);
-      output = ss.getDiagnostics();
-    } catch (Throwable t) {
-      output = response.getEntity(String.class);
-    }
-    if (output==null) {
-      output = response.getEntity(String.class);
-    }
-    if (response.getStatus() <= 299) {
-      LOG.info(output);
-      return EXIT_SUCCESS;
-    } else {
-      LOG.error(output);
-      return EXIT_EXCEPTION_THROWN;
-    }
-  }
-
-  /**
-   * Utility method to load Service json from disk or from
-   * YARN examples.
-   *
-   * @param fileName - path to yarnfile
-   * @param serviceName - YARN Service Name
-   * @param lifetime - application lifetime
-   * @param queue - Queue to submit application
-   * @return
-   * @throws IOException
-   * @throws YarnException
-   */
-  public Service loadAppJsonFromLocalFS(String fileName, String serviceName,
-      Long lifetime, String queue) throws IOException, YarnException {
-    File file = new File(fileName);
-    if (!file.exists() && fileName.equals(file.getName())) {
-      String examplesDirStr = System.getenv("YARN_SERVICE_EXAMPLES_DIR");
-      String[] examplesDirs;
-      if (examplesDirStr == null) {
-        String yarnHome = System
-            .getenv(ApplicationConstants.Environment.HADOOP_YARN_HOME.key());
-        examplesDirs = new String[]{
-            yarnHome + "/share/hadoop/yarn/yarn-service-examples",
-            yarnHome + "/yarn-service-examples"
-        };
-      } else {
-        examplesDirs = StringUtils.split(examplesDirStr, ":");
-      }
-      for (String dir : examplesDirs) {
-        file = new File(MessageFormat.format("{0}/{1}/{2}.json",
-            dir, fileName, fileName));
-        if (file.exists()) {
-          break;
-        }
-        // Then look for secondary location.
-        file = new File(MessageFormat.format("{0}/{1}.json",
-            dir, fileName));
-        if (file.exists()) {
-          break;
-        }
-      }
-    }
-    if (!file.exists()) {
-      throw new YarnException("File or example could not be found: " +
-          fileName);
-    }
-    Path filePath = new Path(file.getAbsolutePath());
-    LOG.info("Loading service definition from local FS: " + filePath);
-    Service service = jsonSerDeser
-        .load(FileSystem.getLocal(getConfig()), filePath);
-    if (!StringUtils.isEmpty(serviceName)) {
-      service.setName(serviceName);
-    }
-    if (lifetime != null && lifetime > 0) {
-      service.setLifetime(lifetime);
-    }
-    if (!StringUtils.isEmpty(queue)) {
-      service.setQueue(queue);
-    }
-    return service;
-  }
-
-  /**
-   * Launch YARN service application.
-   *
-   * @param fileName - path to yarnfile
-   * @param appName - YARN Service Name
-   * @param lifetime - application lifetime
-   * @param queue - Queue to submit application
-   */
-  @Override
-  public int actionLaunch(String fileName, String appName, Long lifetime,
-      String queue) throws IOException, YarnException {
-    int result = EXIT_SUCCESS;
-    try {
-      Service service =
-          loadAppJsonFromLocalFS(fileName, appName, lifetime, queue);
-      String buffer = jsonSerDeser.toJson(service);
-      ClientResponse response = getApiClient()
-          .post(ClientResponse.class, buffer);
-      result = processResponse(response);
-    } catch (Exception e) {
-      LOG.error("Fail to launch application: ", e);
-      result = EXIT_EXCEPTION_THROWN;
-    }
-    return result;
-  }
-
-  /**
-   * Stop YARN service application.
-   *
-   * @param appName - YARN Service Name
-   */
-  @Override
-  public int actionStop(String appName) throws IOException, YarnException {
-    int result = EXIT_SUCCESS;
-    try {
-      Service service = new Service();
-      service.setName(appName);
-      service.setState(ServiceState.STOPPED);
-      String buffer = jsonSerDeser.toJson(service);
-      ClientResponse response = getApiClient(getServicePath(appName))
-          .put(ClientResponse.class, buffer);
-      result = processResponse(response);
-    } catch (Exception e) {
-      LOG.error("Fail to stop application: ", e);
-      result = EXIT_EXCEPTION_THROWN;
-    }
-    return result;
-  }
-
-  /**
-   * Start YARN service application.
-   *
-   * @param appName - YARN Service Name
-   */
-  @Override
-  public int actionStart(String appName) throws IOException, YarnException {
-    int result = EXIT_SUCCESS;
-    try {
-      Service service = new Service();
-      service.setName(appName);
-      service.setState(ServiceState.STARTED);
-      String buffer = jsonSerDeser.toJson(service);
-      ClientResponse response = getApiClient(getServicePath(appName))
-          .put(ClientResponse.class, buffer);
-      result = processResponse(response);
-    } catch (Exception e) {
-      LOG.error("Fail to start application: ", e);
-      result = EXIT_EXCEPTION_THROWN;
-    }
-    return result;
-  }
-
-  /**
-   * Save Service configuration.
-   *
-   * @param fileName - path to Yarnfile
-   * @param appName - YARN Service Name
-   * @param lifetime - container life time
-   * @param queue - Queue to submit the application
-   */
-  @Override
-  public int actionSave(String fileName, String appName, Long lifetime,
-      String queue) throws IOException, YarnException {
-    int result = EXIT_SUCCESS;
-    try {
-      Service service =
-          loadAppJsonFromLocalFS(fileName, appName, lifetime, queue);
-      service.setState(ServiceState.STOPPED);
-      String buffer = jsonSerDeser.toJson(service);
-      ClientResponse response = getApiClient()
-          .post(ClientResponse.class, buffer);
-      result = processResponse(response);
-    } catch (Exception e) {
-      LOG.error("Fail to save application: ", e);
-      result = EXIT_EXCEPTION_THROWN;
-    }
-    return result;
-  }
-
-  /**
-   * Decommission a YARN service.
-   *
-   * @param appName - YARN Service Name
-   */
-  @Override
-  public int actionDestroy(String appName) throws IOException, YarnException {
-    int result = EXIT_SUCCESS;
-    try {
-      ClientResponse response = getApiClient(getServicePath(appName))
-          .delete(ClientResponse.class);
-      result = processResponse(response);
-    } catch (Exception e) {
-      LOG.error("Fail to destroy application: ", e);
-      result = EXIT_EXCEPTION_THROWN;
-    }
-    return result;
-  }
-
-  /**
-   * Change number of containers associated with a service.
-   *
-   * @param appName - YARN Service Name
-   * @param componentCounts - list of components and desired container count
-   */
-  @Override
-  public int actionFlex(String appName, Map<String, String> componentCounts)
-      throws IOException, YarnException {
-    int result = EXIT_SUCCESS;
-    try {
-      Service service = new Service();
-      service.setName(appName);
-      service.setState(ServiceState.FLEX);
-      for (Map.Entry<String, String> entry : componentCounts.entrySet()) {
-        Component component = new Component();
-        component.setName(entry.getKey());
-        Long numberOfContainers = Long.parseLong(entry.getValue());
-        component.setNumberOfContainers(numberOfContainers);
-        service.addComponent(component);
-      }
-      String buffer = jsonSerDeser.toJson(service);
-      ClientResponse response = getApiClient(getServicePath(appName))
-          .put(ClientResponse.class, buffer);
-      result = processResponse(response);
-    } catch (Exception e) {
-      LOG.error("Fail to flex application: ", e);
-      result = EXIT_EXCEPTION_THROWN;
-    }
-    return result;
-  }
-
-  @Override
-  public int enableFastLaunch(String destinationFolder) throws IOException, YarnException {
-    ServiceClient sc = new ServiceClient();
-    sc.init(getConfig());
-    sc.start();
-    int result = sc.enableFastLaunch(destinationFolder);
-    sc.close();
-    return result;
-  }
-
-  /**
-   * Retrieve Service Status through REST API.
-   *
-   * @param appIdOrName - YARN application ID or application name
-   * @return Status output
-   */
-  @Override
-  public String getStatusString(String appIdOrName) throws IOException,
-      YarnException {
-    String output = "";
-    String appName;
-    try {
-      ApplicationId appId = ApplicationId.fromString(appIdOrName);
-      ApplicationReport appReport = yarnClient.getApplicationReport(appId);
-      appName = appReport.getName();
-    } catch (IllegalArgumentException e) {
-      // not app Id format, may be app name
-      appName = appIdOrName;
-      ServiceApiUtil.validateNameFormat(appName, getConfig());
-    }
-    try {
-      ClientResponse response = getApiClient(getServicePath(appName))
-          .get(ClientResponse.class);
-      if (response.getStatus() == 404) {
-        StringBuilder sb = new StringBuilder();
-        sb.append(" Service ");
-        sb.append(appName);
-        sb.append(" not found");
-        return sb.toString();
-      }
-      if (response.getStatus() != 200) {
-        StringBuilder sb = new StringBuilder();
-        sb.append(appName);
-        sb.append(" Failed : HTTP error code : ");
-        sb.append(response.getStatus());
-        return sb.toString();
-      }
-      output = response.getEntity(String.class);
-    } catch (Exception e) {
-      LOG.error("Fail to check application status: ", e);
-    }
-    return output;
-  }
-
-  @Override
-  public int initiateUpgrade(String appName,
-      String fileName, boolean autoFinalize) throws IOException, YarnException {
-    int result;
-    try {
-      Service service =
-          loadAppJsonFromLocalFS(fileName, appName, null, null);
-      if (autoFinalize) {
-        service.setState(ServiceState.UPGRADING_AUTO_FINALIZE);
-      } else {
-        service.setState(ServiceState.UPGRADING);
-      }
-      String buffer = jsonSerDeser.toJson(service);
-      ClientResponse response = getApiClient(getServicePath(appName))
-          .put(ClientResponse.class, buffer);
-      result = processResponse(response);
-    } catch (Exception e) {
-      LOG.error("Failed to upgrade application: ", e);
-      result = EXIT_EXCEPTION_THROWN;
-    }
-    return result;
-  }
-
-  @Override
-  public int actionUpgradeInstances(String appName, List<String> compInstances)
-      throws IOException, YarnException {
-    int result;
-    Container[] toUpgrade = new Container[compInstances.size()];
-    try {
-      int idx = 0;
-      for (String instanceName : compInstances) {
-        Container container = new Container();
-        container.setComponentInstanceName(instanceName);
-        container.setState(ContainerState.UPGRADING);
-        toUpgrade[idx++] = container;
-      }
-      String buffer = CONTAINER_JSON_SERDE.toJson(toUpgrade);
-      ClientResponse response = getApiClient(getInstancesPath(appName))
-          .put(ClientResponse.class, buffer);
-      result = processResponse(response);
-    } catch (Exception e) {
-      LOG.error("Failed to upgrade component instance: ", e);
-      result = EXIT_EXCEPTION_THROWN;
-    }
-    return result;
-  }
-
-  @Override
-  public int actionUpgradeComponents(String appName, List<String> components)
-      throws IOException, YarnException {
-    int result;
-    Component[] toUpgrade = new Component[components.size()];
-    try {
-      int idx = 0;
-      for (String compName : components) {
-        Component component = new Component();
-        component.setName(compName);
-        component.setState(ComponentState.UPGRADING);
-        toUpgrade[idx++] = component;
-      }
-      String buffer = COMP_JSON_SERDE.toJson(toUpgrade);
-      ClientResponse response = getApiClient(getComponentsPath(appName))
-          .put(ClientResponse.class, buffer);
-      result = processResponse(response);
-    } catch (Exception e) {
-      LOG.error("Failed to upgrade components: ", e);
-      result = EXIT_EXCEPTION_THROWN;
-    }
-    return result;
-  }
-
-  private static final JsonSerDeser<Container[]> CONTAINER_JSON_SERDE =
-      new JsonSerDeser<>(Container[].class,
-          PropertyNamingStrategy.CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES);
-
-  private static final JsonSerDeser<Component[]> COMP_JSON_SERDE =
-      new JsonSerDeser<>(Component[].class,
-          PropertyNamingStrategy.CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES);
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
deleted file mode 100644
index f9cfa92..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.service.client;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.service.AbstractService;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.server.service.SystemServiceManager;
-import org.apache.hadoop.yarn.service.api.records.Service;
-import org.apache.hadoop.yarn.service.api.records.ServiceState;
-import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.lang.reflect.UndeclaredThrowableException;
-import java.security.PrivilegedExceptionAction;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.jsonSerDeser;
-
-/**
- * SystemServiceManager implementation.
- * Scan for configure system service path.
- *
- * The service path structure is as follows:
- * SYSTEM_SERVICE_DIR_PATH
- * |---- sync
- * |     |--- user1
- * |     |    |---- service1.yarnfile
- * |     |    |---- service2.yarnfile
- * |     |--- user2
- * |     |    |---- service1.yarnfile
- * |     |    ....
- * |     |
- * |---- async
- * |     |--- user3
- * |     |    |---- service1.yarnfile
- * |     |    |---- service2.yarnfile
- * |     |--- user4
- * |     |    |---- service1.yarnfile
- * |     |    ....
- * |     |
- *
- * sync: These services are launched at the time of service start synchronously.
- *       It is a blocking service start.
- * async: These services are launched in separate thread without any delay after
- *       service start. Non-blocking service start.
- */
-public class SystemServiceManagerImpl extends AbstractService
-    implements SystemServiceManager {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SystemServiceManagerImpl.class);
-
-  private static final String YARN_FILE_SUFFIX = ".yarnfile";
-  private static final String SYNC = "sync";
-  private static final String ASYNC = "async";
-
-  private FileSystem fs;
-  private Path systemServiceDir;
-  private AtomicBoolean stopExecutors = new AtomicBoolean(false);
-  private Map<String, Set<Service>> syncUserServices = new HashMap<>();
-  private Map<String, Set<Service>> asyncUserServices = new HashMap<>();
-  private UserGroupInformation loginUGI;
-  private Thread serviceLaucher;
-
-  @VisibleForTesting
-  private int badFileNameExtensionSkipCounter;
-  @VisibleForTesting
-  private Map<String, Integer> ignoredUserServices =
-      new HashMap<>();
-  @VisibleForTesting
-  private int badDirSkipCounter;
-
-  public SystemServiceManagerImpl() {
-    super(SystemServiceManagerImpl.class.getName());
-  }
-
-  @Override
-  protected void serviceInit(Configuration conf) throws Exception {
-    String dirPath =
-        conf.get(YarnServiceConf.YARN_SERVICES_SYSTEM_SERVICE_DIRECTORY);
-    if (dirPath != null) {
-      systemServiceDir = new Path(dirPath);
-      LOG.info("System Service Directory is configured to {}",
-          systemServiceDir);
-      fs = systemServiceDir.getFileSystem(conf);
-      this.loginUGI = UserGroupInformation.isSecurityEnabled() ?
-          UserGroupInformation.getLoginUser() :
-          UserGroupInformation.getCurrentUser();
-      LOG.info("UserGroupInformation initialized to {}", loginUGI);
-    }
-  }
-
-  @Override
-  protected void serviceStart() throws Exception {
-    scanForUserServices();
-    launchUserService(syncUserServices);
-    // Create a thread and submit services in background otherwise it
-    // block RM switch time.
-    serviceLaucher = new Thread(createRunnable());
-    serviceLaucher.setName("System service launcher");
-    serviceLaucher.start();
-  }
-
-  @Override
-  protected void serviceStop() throws Exception {
-    LOG.info("Stopping {}", getName());
-    stopExecutors.set(true);
-
-    if (serviceLaucher != null) {
-      serviceLaucher.interrupt();
-      try {
-        serviceLaucher.join();
-      } catch (InterruptedException ie) {
-        LOG.warn("Interrupted Exception while stopping", ie);
-      }
-    }
-  }
-
-  private Runnable createRunnable() {
-    return new Runnable() {
-      @Override
-      public void run() {
-        launchUserService(asyncUserServices);
-      }
-    };
-  }
-
-  void launchUserService(Map<String, Set<Service>> userServices) {
-    for (Map.Entry<String, Set<Service>> entry : userServices.entrySet()) {
-      String user = entry.getKey();
-      Set<Service> services = entry.getValue();
-      if (services.isEmpty()) {
-        continue;
-      }
-      ServiceClient serviceClient = null;
-      try {
-        UserGroupInformation userUgi = getProxyUser(user);
-        serviceClient = createServiceClient(userUgi);
-        for (Service service : services) {
-          LOG.info("POST: createService = {} user = {}", service, userUgi);
-          try {
-            launchServices(userUgi, serviceClient, service);
-          } catch (IOException | UndeclaredThrowableException e) {
-            if (e.getCause() != null) {
-              LOG.warn(e.getCause().getMessage());
-            } else {
-              String message =
-                  "Failed to create service " + service.getName() + " : ";
-              LOG.error(message, e);
-            }
-          }
-        }
-      } catch (InterruptedException e) {
-        LOG.warn("System service launcher thread interrupted", e);
-        break;
-      } catch (Exception e) {
-        LOG.error("Error while submitting services for user " + user, e);
-      } finally {
-        if (serviceClient != null) {
-          try {
-            serviceClient.close();
-          } catch (IOException e) {
-            LOG.warn("Error while closing serviceClient for user {}", user);
-          }
-        }
-      }
-    }
-  }
-
-  private ServiceClient createServiceClient(UserGroupInformation userUgi)
-      throws IOException, InterruptedException {
-    ServiceClient serviceClient =
-        userUgi.doAs(new PrivilegedExceptionAction<ServiceClient>() {
-          @Override public ServiceClient run()
-              throws IOException, YarnException {
-            ServiceClient sc = getServiceClient();
-            sc.init(getConfig());
-            sc.start();
-            return sc;
-          }
-        });
-    return serviceClient;
-  }
-
-  private void launchServices(UserGroupInformation userUgi,
-      ServiceClient serviceClient, Service service)
-      throws IOException, InterruptedException {
-    if (service.getState() == ServiceState.STOPPED) {
-      userUgi.doAs(new PrivilegedExceptionAction<Void>() {
-        @Override public Void run() throws IOException, YarnException {
-          serviceClient.actionBuild(service);
-          return null;
-        }
-      });
-      LOG.info("Service {} version {} saved.", service.getName(),
-          service.getVersion());
-    } else {
-      ApplicationId applicationId =
-          userUgi.doAs(new PrivilegedExceptionAction<ApplicationId>() {
-            @Override public ApplicationId run()
-                throws IOException, YarnException {
-              ApplicationId applicationId = serviceClient.actionCreate(service);
-              return applicationId;
-            }
-          });
-      LOG.info("Service {} submitted with Application ID: {}",
-          service.getName(), applicationId);
-    }
-  }
-
-  ServiceClient getServiceClient() {
-    return new ServiceClient();
-  }
-
-  private UserGroupInformation getProxyUser(String user) {
-    UserGroupInformation ugi;
-    if (UserGroupInformation.isSecurityEnabled()) {
-      ugi = UserGroupInformation.createProxyUser(user, loginUGI);
-    } else {
-      ugi = UserGroupInformation.createRemoteUser(user);
-    }
-    return ugi;
-  }
-
-  // scan for both launch service types i.e sync and async
-  void scanForUserServices() throws IOException {
-    if (systemServiceDir == null) {
-      return;
-    }
-    try {
-      LOG.info("Scan for launch type on {}", systemServiceDir);
-      RemoteIterator<FileStatus> iterLaunchType = list(systemServiceDir);
-      while (iterLaunchType.hasNext()) {
-        FileStatus launchType = iterLaunchType.next();
-        if (!launchType.isDirectory()) {
-          LOG.debug("Scanner skips for unknown file {}", launchType.getPath());
-          continue;
-        }
-        if (launchType.getPath().getName().equals(SYNC)) {
-          scanForUserServiceDefinition(launchType.getPath(), syncUserServices);
-        } else if (launchType.getPath().getName().equals(ASYNC)) {
-          scanForUserServiceDefinition(launchType.getPath(), asyncUserServices);
-        } else {
-          badDirSkipCounter++;
-          LOG.debug("Scanner skips for unknown dir {}.", launchType.getPath());
-        }
-      }
-    } catch (FileNotFoundException e) {
-      LOG.warn("System service directory {} doesn't not exist.",
-          systemServiceDir);
-    }
-  }
-
-  // Files are under systemServiceDir/<users>. Scan for 2 levels
-  // 1st level for users
-  // 2nd level for service definitions under user
-  private void scanForUserServiceDefinition(Path userDirPath,
-      Map<String, Set<Service>> userServices) throws IOException {
-    LOG.info("Scan for users on {}", userDirPath);
-    RemoteIterator<FileStatus> iterUsers = list(userDirPath);
-    while (iterUsers.hasNext()) {
-      FileStatus userDir = iterUsers.next();
-      // if 1st level is not user directory then skip it.
-      if (!userDir.isDirectory()) {
-        LOG.info(
-            "Service definition {} doesn't belong to any user. Ignoring.. ",
-            userDir.getPath().getName());
-        continue;
-      }
-      String userName = userDir.getPath().getName();
-      LOG.info("Scanning service definitions for user {}.", userName);
-
-      //2nd level scan
-      RemoteIterator<FileStatus> iterServices = list(userDir.getPath());
-      while (iterServices.hasNext()) {
-        FileStatus serviceCache = iterServices.next();
-        String filename = serviceCache.getPath().getName();
-        if (!serviceCache.isFile()) {
-          LOG.info("Scanner skips for unknown dir {}", filename);
-          continue;
-        }
-        if (!filename.endsWith(YARN_FILE_SUFFIX)) {
-          LOG.info("Scanner skips for unknown file extension, filename = {}",
-              filename);
-          badFileNameExtensionSkipCounter++;
-          continue;
-        }
-        Service service = getServiceDefinition(serviceCache.getPath());
-        if (service != null) {
-          Set<Service> services = userServices.get(userName);
-          if (services == null) {
-            services = new HashSet<>();
-            userServices.put(userName, services);
-          }
-          if (!services.add(service)) {
-            int count = ignoredUserServices.containsKey(userName) ?
-                ignoredUserServices.get(userName) : 0;
-            ignoredUserServices.put(userName, count + 1);
-            LOG.warn(
-                "Ignoring service {} for the user {} as it is already present,"
-                    + " filename = {}", service.getName(), userName, filename);
-          } else {
-            LOG.info("Added service {} for the user {}, filename = {}",
-                service.getName(), userName, filename);
-          }
-        }
-      }
-    }
-  }
-
-  private Service getServiceDefinition(Path filePath) {
-    Service service = null;
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Loading service definition from FS: " + filePath);
-      }
-      service = jsonSerDeser.load(fs, filePath);
-    } catch (IOException e) {
-      LOG.info("Error while loading service definition from FS: {}", e);
-    }
-    return service;
-  }
-
-  private RemoteIterator<FileStatus> list(Path path) throws IOException {
-    return new StoppableRemoteIterator(fs.listStatusIterator(path));
-  }
-
-  @VisibleForTesting Map<String, Integer> getIgnoredUserServices() {
-    return ignoredUserServices;
-  }
-
-  private class StoppableRemoteIterator implements RemoteIterator<FileStatus> {
-    private final RemoteIterator<FileStatus> remote;
-
-    StoppableRemoteIterator(RemoteIterator<FileStatus> remote) {
-      this.remote = remote;
-    }
-
-    @Override public boolean hasNext() throws IOException {
-      return !stopExecutors.get() && remote.hasNext();
-    }
-
-    @Override public FileStatus next() throws IOException {
-      return remote.next();
-    }
-  }
-
-  @VisibleForTesting
-  Map<String, Set<Service>> getSyncUserServices() {
-    return syncUserServices;
-  }
-
-  @VisibleForTesting
-  int getBadFileNameExtensionSkipCounter() {
-    return badFileNameExtensionSkipCounter;
-  }
-
-  @VisibleForTesting
-  int getBadDirSkipCounter() {
-    return badDirSkipCounter;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/package-info.java
deleted file mode 100644
index cf5ce11..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/package-info.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package org.apache.hadoop.yarn.service.client contains classes
- * for YARN Services Client API.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-package org.apache.hadoop.yarn.service.client;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
deleted file mode 100644
index 46c9abe..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ /dev/null
@@ -1,818 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.service.webapp;
-
-import com.google.common.base.Joiner;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.VersionInfo;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.service.api.records.Component;
-import org.apache.hadoop.yarn.service.api.records.ComponentState;
-import org.apache.hadoop.yarn.service.api.records.Container;
-import org.apache.hadoop.yarn.service.api.records.ContainerState;
-import org.apache.hadoop.yarn.service.api.records.Service;
-import org.apache.hadoop.yarn.service.api.records.ServiceState;
-import org.apache.hadoop.yarn.service.api.records.ServiceStatus;
-import org.apache.hadoop.yarn.service.client.ServiceClient;
-import org.apache.hadoop.yarn.service.conf.RestApiConstants;
-import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.ws.rs.Consumes;
-import javax.ws.rs.DELETE;
-import javax.ws.rs.GET;
-import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.Status;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.lang.reflect.UndeclaredThrowableException;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.yarn.service.api.records.ServiceState.ACCEPTED;
-import static org.apache.hadoop.yarn.service.conf.RestApiConstants.*;
-import static org.apache.hadoop.yarn.service.exceptions.LauncherExitCodes.*;
-
-/**
- * The rest API endpoints for users to manage services on YARN.
- */
-@Singleton
-@Path(CONTEXT_ROOT)
-public class ApiServer {
-
-  public ApiServer() {
-    super();
-  }
-  
-  @Inject
-  public ApiServer(Configuration conf) {
-    super();
-  }
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ApiServer.class);
-  private static Configuration YARN_CONFIG = new YarnConfiguration();
-  private ServiceClient serviceClientUnitTest;
-  private boolean unitTest = false;
-
-  static {
-    init();
-  }
-
-  // initialize all the common resources - order is important
-  private static void init() {
-  }
-
-  @GET
-  @Path(VERSION)
-  @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
-  public Response getVersion() {
-    String version = VersionInfo.getBuildVersion();
-    LOG.info(version);
-    return Response.ok("{ \"hadoop_version\": \"" + version + "\"}").build();
-  }
-
-  @POST
-  @Path(SERVICE_ROOT_PATH)
-  @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
-  public Response createService(@Context HttpServletRequest request,
-      Service service) {
-    ServiceStatus serviceStatus = new ServiceStatus();
-    try {
-      UserGroupInformation ugi = getProxyUser(request);
-      LOG.info("POST: createService = {} user = {}", service, ugi);
-      if(service.getState()==ServiceState.STOPPED) {
-        ugi.doAs(new PrivilegedExceptionAction<Void>() {
-          @Override
-          public Void run() throws YarnException, IOException {
-            ServiceClient sc = getServiceClient();
-            sc.init(YARN_CONFIG);
-            sc.start();
-            sc.actionBuild(service);
-            sc.close();
-            return null;
-          }
-        });
-        serviceStatus.setDiagnostics("Service " + service.getName() +
-            " version " + service.getVersion() + " saved.");
-      } else {
-        ApplicationId applicationId = ugi
-            .doAs(new PrivilegedExceptionAction<ApplicationId>() {
-              @Override
-              public ApplicationId run() throws IOException, YarnException {
-                ServiceClient sc = getServiceClient();
-                sc.init(YARN_CONFIG);
-                sc.start();
-                ApplicationId applicationId = sc.actionCreate(service);
-                sc.close();
-                return applicationId;
-              }
-            });
-        serviceStatus.setDiagnostics("Application ID: " + applicationId);
-      }
-      serviceStatus.setState(ACCEPTED);
-      serviceStatus.setUri(
-          CONTEXT_ROOT + SERVICE_ROOT_PATH + "/" + service
-              .getName());
-      return formatResponse(Status.ACCEPTED, serviceStatus);
-    } catch (AccessControlException e) {
-      serviceStatus.setDiagnostics(e.getMessage());
-      return formatResponse(Status.FORBIDDEN, e.getCause().getMessage());
-    } catch (IllegalArgumentException e) {
-      return formatResponse(Status.BAD_REQUEST, e.getMessage());
-    } catch (IOException | InterruptedException e) {
-      String message = "Failed to create service " + service.getName()
-          + ": {}";
-      LOG.error(message, e);
-      return formatResponse(Status.INTERNAL_SERVER_ERROR, e.getMessage());
-    } catch (UndeclaredThrowableException e) {
-      String message = "Failed to create service " + service.getName()
-          + ": {}";
-      LOG.error(message, e);
-      if (e.getCause().getMessage().contains("already exists")) {
-        message = "Service name " + service.getName() + " is already taken.";
-      } else {
-        message = e.getCause().getMessage();
-      }
-      return formatResponse(Status.INTERNAL_SERVER_ERROR,
-          message);
-    }
-  }
-
-  @GET
-  @Path(SERVICE_PATH)
-  @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
-  public Response getService(@Context HttpServletRequest request,
-      @PathParam(SERVICE_NAME) String appName) {
-    ServiceStatus serviceStatus = new ServiceStatus();
-    try {
-      if (appName == null) {
-        throw new IllegalArgumentException("Service name cannot be null.");
-      }
-      UserGroupInformation ugi = getProxyUser(request);
-      LOG.info("GET: getService for appName = {} user = {}", appName, ugi);
-      Service app = getServiceFromClient(ugi, appName);
-      return Response.ok(app).build();
-    } catch (AccessControlException e) {
-      return formatResponse(Status.FORBIDDEN, e.getMessage());
-    } catch (IllegalArgumentException e) {
-      serviceStatus.setDiagnostics(e.getMessage());
-      serviceStatus.setCode(ERROR_CODE_APP_NAME_INVALID);
-      return Response.status(Status.NOT_FOUND).entity(serviceStatus)
-          .build();
-    } catch (FileNotFoundException e) {
-      serviceStatus.setDiagnostics("Service " + appName + " not found");
-      serviceStatus.setCode(ERROR_CODE_APP_NAME_INVALID);
-      return Response.status(Status.NOT_FOUND).entity(serviceStatus)
-          .build();
-    } catch (IOException | InterruptedException e) {
-      LOG.error("Get service failed: {}", e);
-      return formatResponse(Status.INTERNAL_SERVER_ERROR, e.getMessage());
-    } catch (UndeclaredThrowableException e) {
-      LOG.error("Get service failed: {}", e);
-      return formatResponse(Status.INTERNAL_SERVER_ERROR,
-          e.getCause().getMessage());
-    }
-  }
-
-  @DELETE
-  @Path(SERVICE_PATH)
-  @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
-  public Response deleteService(@Context HttpServletRequest request,
-      @PathParam(SERVICE_NAME) String appName) {
-    try {
-      if (appName == null) {
-        throw new IllegalArgumentException("Service name can not be null.");
-      }
-      UserGroupInformation ugi = getProxyUser(request);
-      LOG.info("DELETE: deleteService for appName = {} user = {}",
-          appName, ugi);
-      return stopService(appName, true, ugi);
-    } catch (AccessControlException e) {
-      return formatResponse(Status.FORBIDDEN, e.getMessage());
-    } catch (IllegalArgumentException e) {
-      return formatResponse(Status.BAD_REQUEST, e.getMessage());
-    } catch (UndeclaredThrowableException e) {
-      LOG.error("Fail to stop service: {}", e);
-      return formatResponse(Status.BAD_REQUEST,
-          e.getCause().getMessage());
-    } catch (YarnException | FileNotFoundException e) {
-      return formatResponse(Status.NOT_FOUND, e.getMessage());
-    } catch (Exception e) {
-      LOG.error("Fail to stop service: {}", e);
-      return formatResponse(Status.INTERNAL_SERVER_ERROR, e.getMessage());
-    }
-  }
-
-  private Response stopService(String appName, boolean destroy,
-      final UserGroupInformation ugi) throws Exception {
-    int result = ugi.doAs(new PrivilegedExceptionAction<Integer>() {
-      @Override
-      public Integer run() throws Exception {
-        int result = 0;
-        ServiceClient sc = getServiceClient();
-        sc.init(YARN_CONFIG);
-        sc.start();
-        Exception stopException = null;
-        try {
-          result = sc.actionStop(appName, destroy);
-          if (result == EXIT_SUCCESS) {
-            LOG.info("Successfully stopped service {}", appName);
-          }
-        } catch (Exception e) {
-          LOG.info("Got exception stopping service", e);
-          stopException = e;
-        }
-        if (destroy) {
-          result = sc.actionDestroy(appName);
-          if (result == EXIT_SUCCESS) {
-            LOG.info("Successfully deleted service {}", appName);
-          }
-        } else {
-          if (stopException != null) {
-            throw stopException;
-          }
-        }
-        sc.close();
-        return result;
-      }
-    });
-    ServiceStatus serviceStatus = new ServiceStatus();
-    if (destroy) {
-      if (result == EXIT_SUCCESS) {
-        serviceStatus.setDiagnostics("Successfully destroyed service " +
-            appName);
-      } else {
-        if (result == EXIT_NOT_FOUND) {
-          serviceStatus
-              .setDiagnostics("Service " + appName + " doesn't exist");
-          return formatResponse(Status.BAD_REQUEST, serviceStatus);
-        } else {
-          serviceStatus
-              .setDiagnostics("Service " + appName + " error cleaning up " +
-                  "registry");
-          return formatResponse(Status.INTERNAL_SERVER_ERROR, serviceStatus);
-        }
-      }
-    } else {
-      if (result == EXIT_COMMAND_ARGUMENT_ERROR) {
-        serviceStatus
-            .setDiagnostics("Service " + appName + " is already stopped");
-        return formatResponse(Status.BAD_REQUEST, serviceStatus);
-      } else {
-        serviceStatus.setDiagnostics("Successfully stopped service " + appName);
-      }
-    }
-    return formatResponse(Status.OK, serviceStatus);
-  }
-
-  @PUT
-  @Path(COMPONENTS_PATH)
-  @Consumes({MediaType.APPLICATION_JSON})
-  @Produces({RestApiConstants.MEDIA_TYPE_JSON_UTF8, MediaType.TEXT_PLAIN})
-  public Response updateComponents(@Context HttpServletRequest request,
-      @PathParam(SERVICE_NAME) String serviceName,
-      List<Component> requestComponents) {
-
-    try {
-      if (requestComponents == null || requestComponents.isEmpty()) {
-        throw new YarnException("No components provided.");
-      }
-      UserGroupInformation ugi = getProxyUser(request);
-      Set<String> compNamesToUpgrade = new HashSet<>();
-      requestComponents.forEach(reqComp -> {
-        if (reqComp.getState() != null &&
-            reqComp.getState().equals(ComponentState.UPGRADING)) {
-          compNamesToUpgrade.add(reqComp.getName());
-        }
-      });
-      LOG.info("PUT: upgrade components {} for service {} " +
-          "user = {}", compNamesToUpgrade, serviceName, ugi);
-      return processComponentsUpgrade(ugi, serviceName, compNamesToUpgrade);
-    } catch (AccessControlException e) {
-      return formatResponse(Response.Status.FORBIDDEN, e.getMessage());
-    } catch (YarnException e) {
-      return formatResponse(Response.Status.BAD_REQUEST, e.getMessage());
-    } catch (IOException | InterruptedException e) {
-      return formatResponse(Response.Status.INTERNAL_SERVER_ERROR,
-          e.getMessage());
-    } catch (UndeclaredThrowableException e) {
-      return formatResponse(Response.Status.INTERNAL_SERVER_ERROR,
-          e.getCause().getMessage());
-    }
-  }
-
-  @PUT
-  @Path(COMPONENT_PATH)
-  @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8",
-              MediaType.TEXT_PLAIN  })
-  public Response updateComponent(@Context HttpServletRequest request,
-      @PathParam(SERVICE_NAME) String appName,
-      @PathParam(COMPONENT_NAME) String componentName, Component component) {
-
-    try {
-      if (component == null) {
-        throw new YarnException("No component data provided");
-      }
-      if (component.getName() != null
-          && !component.getName().equals(componentName)) {
-        String msg = "Component name in the request object ("
-            + component.getName() + ") does not match that in the URI path ("
-            + componentName + ")";
-        throw new YarnException(msg);
-      }
-      UserGroupInformation ugi = getProxyUser(request);
-      if (component.getState() != null &&
-          component.getState().equals(ComponentState.UPGRADING)) {
-        LOG.info("PUT: upgrade component {} for service {} " +
-            "user = {}", component.getName(), appName, ugi);
-        return processComponentsUpgrade(ugi, appName,
-            Sets.newHashSet(componentName));
-      }
-
-      if (component.getNumberOfContainers() == null) {
-        throw new YarnException("No container count provided");
-      }
-      if (component.getNumberOfContainers() < 0) {
-        String message = "Invalid number of containers specified "
-            + component.getNumberOfContainers();
-        throw new YarnException(message);
-      }
-      Map<String, Long> original = ugi
-          .doAs(new PrivilegedExceptionAction<Map<String, Long>>() {
-            @Override
-            public Map<String, Long> run() throws YarnException, IOException {
-              ServiceClient sc = new ServiceClient();
-              sc.init(YARN_CONFIG);
-              sc.start();
-              Map<String, Long> original = sc.flexByRestService(appName,
-                  Collections.singletonMap(componentName,
-                      component.getNumberOfContainers()));
-              sc.close();
-              return original;
-            }
-          });
-      ServiceStatus status = new ServiceStatus();
-      status.setDiagnostics(
-          "Updating component (" + componentName + ") size from " + original
-              .get(componentName) + " to " + component.getNumberOfContainers());
-      return formatResponse(Status.OK, status);
-    } catch (AccessControlException e) {
-      return formatResponse(Status.FORBIDDEN, e.getMessage());
-    } catch (YarnException e) {
-      return formatResponse(Status.BAD_REQUEST, e.getMessage());
-    } catch (IOException | InterruptedException e) {
-      return formatResponse(Status.INTERNAL_SERVER_ERROR,
-          e.getMessage());
-    } catch (UndeclaredThrowableException e) {
-      return formatResponse(Status.INTERNAL_SERVER_ERROR,
-          e.getCause().getMessage());
-    }
-  }
-
-  @PUT
-  @Path(SERVICE_PATH)
-  @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
-  public Response updateService(@Context HttpServletRequest request,
-      @PathParam(SERVICE_NAME) String appName,
-      Service updateServiceData) {
-    try {
-      UserGroupInformation ugi = getProxyUser(request);
-      LOG.info("PUT: updateService for app = {} with data = {} user = {}",
-          appName, updateServiceData, ugi);
-      // Ignore the app name provided in updateServiceData and always use
-      // appName path param
-      updateServiceData.setName(appName);
-
-      if (updateServiceData.getState() != null
-          && updateServiceData.getState() == ServiceState.FLEX) {
-        return flexService(updateServiceData, ugi);
-      }
-      // For STOP the app should be running. If already stopped then this
-      // operation will be a no-op. For START it should be in stopped state.
-      // If already running then this operation will be a no-op.
-      if (updateServiceData.getState() != null
-          && updateServiceData.getState() == ServiceState.STOPPED) {
-        return stopService(appName, false, ugi);
-      }
-
-      // If a START is requested
-      if (updateServiceData.getState() != null
-          && updateServiceData.getState() == ServiceState.STARTED) {
-        return startService(appName, ugi);
-      }
-
-      // If an UPGRADE is requested
-      if (updateServiceData.getState() != null && (
-          updateServiceData.getState() == ServiceState.UPGRADING ||
-              updateServiceData.getState() ==
-                  ServiceState.UPGRADING_AUTO_FINALIZE)) {
-        return upgradeService(updateServiceData, ugi);
-      }
-
-      // If new lifetime value specified then update it
-      if (updateServiceData.getLifetime() != null
-          && updateServiceData.getLifetime() > 0) {
-        return updateLifetime(appName, updateServiceData, ugi);
-      }
-    } catch (UndeclaredThrowableException e) {
-      return formatResponse(Status.BAD_REQUEST,
-          e.getCause().getMessage());
-    } catch (AccessControlException e) {
-      return formatResponse(Status.FORBIDDEN, e.getMessage());
-    } catch (FileNotFoundException e) {
-      String message = "Application is not found app: " + appName;
-      LOG.error(message, e);
-      return formatResponse(Status.NOT_FOUND, e.getMessage());
-    } catch (YarnException e) {
-      String message = "Service is not found in hdfs: " + appName;
-      LOG.error(message, e);
-      return formatResponse(Status.NOT_FOUND, e.getMessage());
-    } catch (Exception e) {
-      String message = "Error while performing operation for app: " + appName;
-      LOG.error(message, e);
-      return formatResponse(Status.INTERNAL_SERVER_ERROR, e.getMessage());
-    }
-
-    // If nothing happens consider it a no-op
-    return Response.status(Status.NO_CONTENT).build();
-  }
-
-  @PUT
-  @Path(COMP_INSTANCE_LONG_PATH)
-  @Consumes({MediaType.APPLICATION_JSON})
-  @Produces({RestApiConstants.MEDIA_TYPE_JSON_UTF8, MediaType.TEXT_PLAIN})
-  public Response updateComponentInstance(@Context HttpServletRequest request,
-      @PathParam(SERVICE_NAME) String serviceName,
-      @PathParam(COMPONENT_NAME) String componentName,
-      @PathParam(COMP_INSTANCE_NAME) String compInstanceName,
-      Container reqContainer) {
-
-    try {
-      UserGroupInformation ugi = getProxyUser(request);
-      LOG.info("PUT: update component instance {} for component = {}" +
-              " service = {} user = {}", compInstanceName, componentName,
-          serviceName, ugi);
-      if (reqContainer == null) {
-        throw new YarnException("No container data provided.");
-      }
-      Service service = getServiceFromClient(ugi, serviceName);
-      Component component = service.getComponent(componentName);
-      if (component == null) {
-        throw new YarnException(String.format(
-            "The component name in the URI path (%s) is invalid.",
-            componentName));
-      }
-
-      Container liveContainer = component.getComponentInstance(
-          compInstanceName);
-      if (liveContainer == null) {
-        throw new YarnException(String.format(
-            "The component (%s) does not have a component instance (%s).",
-            componentName, compInstanceName));
-      }
-
-      if (reqContainer.getState() != null
-          && reqContainer.getState().equals(ContainerState.UPGRADING)) {
-        return processContainersUpgrade(ugi, service,
-            Lists.newArrayList(liveContainer));
-      }
-    } catch (AccessControlException e) {
-      return formatResponse(Response.Status.FORBIDDEN, e.getMessage());
-    } catch (YarnException e) {
-      return formatResponse(Response.Status.BAD_REQUEST, e.getMessage());
-    } catch (IOException | InterruptedException e) {
-      return formatResponse(Response.Status.INTERNAL_SERVER_ERROR,
-          e.getMessage());
-    } catch (UndeclaredThrowableException e) {
-      return formatResponse(Response.Status.INTERNAL_SERVER_ERROR,
-          e.getCause().getMessage());
-    }
-    return Response.status(Status.NO_CONTENT).build();
-  }
-
-  @PUT
-  @Path(COMP_INSTANCES_PATH)
-  @Consumes({MediaType.APPLICATION_JSON})
-  @Produces({RestApiConstants.MEDIA_TYPE_JSON_UTF8, MediaType.TEXT_PLAIN})
-  public Response updateComponentInstances(@Context HttpServletRequest request,
-      @PathParam(SERVICE_NAME) String serviceName,
-      List<Container> requestContainers) {
-
-    try {
-      if (requestContainers == null || requestContainers.isEmpty()) {
-        throw new YarnException("No containers provided.");
-      }
-      UserGroupInformation ugi = getProxyUser(request);
-      List<String> toUpgrade = new ArrayList<>();
-      for (Container reqContainer : requestContainers) {
-        if (reqContainer.getState() != null &&
-            reqContainer.getState().equals(ContainerState.UPGRADING)) {
-          toUpgrade.add(reqContainer.getComponentInstanceName());
-        }
-      }
-
-      if (!toUpgrade.isEmpty()) {
-        Service service = getServiceFromClient(ugi, serviceName);
-        LOG.info("PUT: upgrade component instances {} for service = {} " +
-            "user = {}", toUpgrade, serviceName, ugi);
-        List<Container> liveContainers = ServiceApiUtil
-            .getLiveContainers(service, toUpgrade);
-
-        return processContainersUpgrade(ugi, service, liveContainers);
-      }
-    } catch (AccessControlException e) {
-      return formatResponse(Response.Status.FORBIDDEN, e.getMessage());
-    } catch (YarnException e) {
-      return formatResponse(Response.Status.BAD_REQUEST, e.getMessage());
-    } catch (IOException | InterruptedException e) {
-      return formatResponse(Response.Status.INTERNAL_SERVER_ERROR,
-          e.getMessage());
-    } catch (UndeclaredThrowableException e) {
-      return formatResponse(Response.Status.INTERNAL_SERVER_ERROR,
-          e.getCause().getMessage());
-    }
-    return Response.status(Status.NO_CONTENT).build();
-  }
-
-  private Response flexService(Service service, UserGroupInformation ugi)
-      throws IOException, InterruptedException {
-    String appName = service.getName();
-    Response response = Response.status(Status.BAD_REQUEST).build();
-    Map<String, String> componentCountStrings = new HashMap<String, String>();
-    for (Component c : service.getComponents()) {
-      componentCountStrings.put(c.getName(),
-          c.getNumberOfContainers().toString());
-    }
-    Integer result = ugi.doAs(new PrivilegedExceptionAction<Integer>() {
-
-      @Override
-      public Integer run() throws YarnException, IOException {
-        int result = 0;
-        ServiceClient sc = new ServiceClient();
-        sc.init(YARN_CONFIG);
-        sc.start();
-        result = sc
-            .actionFlex(appName, componentCountStrings);
-        sc.close();
-        return Integer.valueOf(result);
-      }
-    });
-    if (result == EXIT_SUCCESS) {
-      String message = "Service " + appName + " is successfully flexed.";
-      LOG.info(message);
-      ServiceStatus status = new ServiceStatus();
-      status.setDiagnostics(message);
-      status.setState(ServiceState.ACCEPTED);
-      response = formatResponse(Status.ACCEPTED, status);
-    }
-    return response;
-  }
-
-  private Response updateLifetime(String appName, Service updateAppData,
-      final UserGroupInformation ugi) throws IOException,
-      InterruptedException {
-    String newLifeTime = ugi.doAs(new PrivilegedExceptionAction<String>() {
-      @Override
-      public String run() throws YarnException, IOException {
-        ServiceClient sc = getServiceClient();
-        sc.init(YARN_CONFIG);
-        sc.start();
-        String newLifeTime = sc.updateLifetime(appName,
-            updateAppData.getLifetime());
-        sc.close();
-        return newLifeTime;
-      }
-    });
-    ServiceStatus status = new ServiceStatus();
-    status.setDiagnostics(
-        "Service (" + appName + ")'s lifeTime is updated to " + newLifeTime
-            + ", " + updateAppData.getLifetime() + " seconds remaining");
-    return formatResponse(Status.OK, status);
-  }
-
-  private Response startService(String appName,
-      final UserGroupInformation ugi) throws IOException,
-      InterruptedException {
-    ugi.doAs(new PrivilegedExceptionAction<Void>() {
-      @Override
-      public Void run() throws YarnException, IOException {
-        ServiceClient sc = getServiceClient();
-        sc.init(YARN_CONFIG);
-        sc.start();
-        sc.actionStart(appName);
-        sc.close();
-        return null;
-      }
-    });
-    LOG.info("Successfully started service " + appName);
-    ServiceStatus status = new ServiceStatus();
-    status.setDiagnostics("Service " + appName + " is successfully started.");
-    status.setState(ServiceState.ACCEPTED);
-    return formatResponse(Status.OK, status);
-  }
-
-  private Response upgradeService(Service service,
-      final UserGroupInformation ugi) throws IOException, InterruptedException {
-    ServiceStatus status = new ServiceStatus();
-    ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
-      ServiceClient sc = getServiceClient();
-      sc.init(YARN_CONFIG);
-      sc.start();
-      sc.initiateUpgrade(service);
-      sc.close();
-      return null;
-    });
-    LOG.info("Service {} version {} upgrade initialized", service.getName(),
-        service.getVersion());
-    status.setDiagnostics("Service " + service.getName() +
-        " version " + service.getVersion() + " saved.");
-    status.setState(ServiceState.ACCEPTED);
-    return formatResponse(Status.ACCEPTED, status);
-  }
-
-  private Response processComponentsUpgrade(UserGroupInformation ugi,
-      String serviceName, Set<String> compNames) throws YarnException,
-      IOException, InterruptedException {
-    Service service = getServiceFromClient(ugi, serviceName);
-    if (service.getState() != ServiceState.UPGRADING) {
-      throw new YarnException(
-          String.format("The upgrade of service %s has not been initiated.",
-              service.getName()));
-    }
-    List<Container> containersToUpgrade = ServiceApiUtil
-        .validateAndResolveCompsUpgrade(service, compNames);
-    Integer result = invokeContainersUpgrade(ugi, service, containersToUpgrade);
-    if (result == EXIT_SUCCESS) {
-      ServiceStatus status = new ServiceStatus();
-      status.setDiagnostics(
-          "Upgrading components " + Joiner.on(',').join(compNames) + ".");
-      return formatResponse(Response.Status.ACCEPTED, status);
-    }
-    // If result is not a success, consider it a no-op
-    return Response.status(Response.Status.NO_CONTENT).build();
-  }
-
-  private Response processContainersUpgrade(UserGroupInformation ugi,
-      Service service, List<Container> containers) throws YarnException,
-      IOException, InterruptedException {
-
-    if (service.getState() != ServiceState.UPGRADING) {
-      throw new YarnException(
-          String.format("The upgrade of service %s has not been initiated.",
-              service.getName()));
-    }
-    ServiceApiUtil.validateInstancesUpgrade(containers);
-    Integer result = invokeContainersUpgrade(ugi, service, containers);
-    if (result == EXIT_SUCCESS) {
-      ServiceStatus status = new ServiceStatus();
-      status.setDiagnostics(
-          "Upgrading component instances " + containers.stream()
-              .map(Container::getId).collect(Collectors.joining(",")) + ".");
-      return formatResponse(Response.Status.ACCEPTED, status);
-    }
-    // If result is not a success, consider it a no-op
-    return Response.status(Response.Status.NO_CONTENT).build();
-  }
-
-  private int invokeContainersUpgrade(UserGroupInformation ugi,
-      Service service, List<Container> containers) throws IOException,
-      InterruptedException {
-    return ugi.doAs((PrivilegedExceptionAction<Integer>) () -> {
-      int result1;
-      ServiceClient sc = getServiceClient();
-      sc.init(YARN_CONFIG);
-      sc.start();
-      result1 = sc.actionUpgrade(service, containers);
-      sc.close();
-      return result1;
-    });
-  }
-
-  private Service getServiceFromClient(UserGroupInformation ugi,
-      String serviceName) throws IOException, InterruptedException {
-
-    return ugi.doAs((PrivilegedExceptionAction<Service>) () -> {
-      ServiceClient sc = getServiceClient();
-      sc.init(YARN_CONFIG);
-      sc.start();
-      Service app1 = sc.getStatus(serviceName);
-      sc.close();
-      return app1;
-    });
-  }
-
-  /**
-   * Used by negative test case.
-   *
-   * @param mockServerClient - A mocked version of ServiceClient
-   */
-  public void setServiceClient(ServiceClient mockServerClient) {
-    serviceClientUnitTest = mockServerClient;
-    unitTest = true;
-  }
-
-  private ServiceClient getServiceClient() {
-    if (unitTest) {
-      return serviceClientUnitTest;
-    } else {
-      return new ServiceClient();
-    }
-  }
-
-  /**
-   * Configure impersonation callback.
-   *
-   * @param request - web request
-   * @return - configured UGI class for proxy callback
-   * @throws IOException - if user is not login.
-   */
-  private UserGroupInformation getProxyUser(HttpServletRequest request)
-      throws AccessControlException {
-    UserGroupInformation proxyUser;
-    UserGroupInformation ugi;
-    String remoteUser = request.getRemoteUser();
-    try {
-      if (UserGroupInformation.isSecurityEnabled()) {
-        proxyUser = UserGroupInformation.getLoginUser();
-        ugi = UserGroupInformation.createProxyUser(remoteUser, proxyUser);
-      } else {
-        ugi = UserGroupInformation.createRemoteUser(remoteUser);
-      }
-      return ugi;
-    } catch (IOException e) {
-      throw new AccessControlException(e.getCause());
-    }
-  }
-
-  /**
-   * Format HTTP response.
-   *
-   * @param status - HTTP Code
-   * @param message - Diagnostic message
-   * @return - HTTP response
-   */
-  private Response formatResponse(Status status, String message) {
-    ServiceStatus entity = new ServiceStatus();
-    entity.setDiagnostics(message);
-    return formatResponse(status, entity);
-  }
-
-  /**
-   * Format HTTP response.
-   *
-   * @param status - HTTP Code
-   * @param entity - ServiceStatus object
-   * @return - HTTP response
-   */
-  private Response formatResponse(Status status, ServiceStatus entity) {
-    return Response.status(status).entity(entity).build();
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: YARN-7900. [AMRMProxy] AMRMClientRelayer for stateful FederationInterceptor. (Botong Huang via asuresh)

Posted by ar...@apache.org.
YARN-7900. [AMRMProxy] AMRMClientRelayer for stateful FederationInterceptor. (Botong Huang via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3159bffc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3159bffc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3159bffc

Branch: refs/heads/HDDS-48
Commit: 3159bffce23abf35754da2d7d51de7d8c2631ae3
Parents: f749517
Author: Arun Suresh <as...@apache.org>
Authored: Thu May 17 20:00:52 2018 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Thu May 17 20:00:52 2018 -0700

----------------------------------------------------------------------
 .../yarn/client/api/impl/AMRMClientImpl.java    | 151 ++------
 .../hadoop/yarn/client/AMRMClientUtils.java     | 262 +++++++++++++
 .../hadoop/yarn/server/AMRMClientRelayer.java   | 364 +++++++++++++++++++
 .../failover/FederationProxyProviderUtil.java   |   2 +-
 .../apache/hadoop/yarn/server/package-info.java |  18 +
 .../server/scheduler/ResourceRequestSet.java    | 206 +++++++++++
 .../server/scheduler/ResourceRequestSetKey.java | 133 +++++++
 .../server/scheduler/SchedulerRequestKey.java   |   4 +-
 .../yarn/server/uam/UnmanagedAMPoolManager.java |   2 +-
 .../server/uam/UnmanagedApplicationManager.java |   2 +-
 .../yarn/server/utils/AMRMClientUtils.java      | 191 ----------
 .../yarn/server/MockResourceManagerFacade.java  |   2 +-
 .../yarn/server/TestAMRMClientRelayer.java      | 275 ++++++++++++++
 .../amrmproxy/FederationInterceptor.java        |   2 +-
 .../ApplicationMasterService.java               |   2 +-
 .../TestApplicationMasterLauncher.java          |   2 +-
 16 files changed, 1299 insertions(+), 319 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159bffc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
index ef849b2..36c3cf1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
@@ -31,11 +31,9 @@ import java.util.LinkedHashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
-import java.util.Queue;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.AbstractMap.SimpleEntry;
-import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -68,6 +66,7 @@ import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.client.AMRMClientUtils;
 import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
@@ -113,13 +112,9 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
   protected final Set<String> blacklistRemovals = new HashSet<String>();
   private Map<Set<String>, PlacementConstraint> placementConstraints =
       new HashMap<>();
-  private Queue<Collection<SchedulingRequest>> batchedSchedulingRequests =
-      new LinkedList<>();
-  private Map<Set<String>, List<SchedulingRequest>> outstandingSchedRequests =
-      new ConcurrentHashMap<>();
 
   protected Map<String, Resource> resourceProfilesMap;
-  
+
   static class ResourceRequestInfo<T> {
     ResourceRequest remoteRequest;
     LinkedHashSet<T> containerRequests;
@@ -168,6 +163,10 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
       SimpleEntry<Container, UpdateContainerRequest>> pendingChange =
       new HashMap<>();
 
+  private List<SchedulingRequest> schedulingRequests = new ArrayList<>();
+  private Map<Set<String>, List<SchedulingRequest>> outstandingSchedRequests =
+      new HashMap<>();
+
   public AMRMClientImpl() {
     super(AMRMClientImpl.class.getName());
   }
@@ -252,18 +251,18 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
       this.resourceProfilesMap = response.getResourceProfiles();
       List<Container> prevContainers =
           response.getContainersFromPreviousAttempts();
-      removeFromOutstandingSchedulingRequests(prevContainers);
-      recreateSchedulingRequestBatch();
+      AMRMClientUtils.removeFromOutstandingSchedulingRequests(prevContainers,
+          this.outstandingSchedRequests);
     }
     return response;
   }
 
   @Override
-  public void addSchedulingRequests(
-      Collection<SchedulingRequest> schedulingRequests) {
-    synchronized (this.batchedSchedulingRequests) {
-      this.batchedSchedulingRequests.add(schedulingRequests);
-    }
+  public synchronized void addSchedulingRequests(
+      Collection<SchedulingRequest> newSchedulingRequests) {
+    this.schedulingRequests.addAll(newSchedulingRequests);
+    AMRMClientUtils.addToOutstandingSchedulingRequests(newSchedulingRequests,
+        this.outstandingSchedRequests);
   }
 
   @Override
@@ -279,6 +278,8 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
     List<String> blacklistToRemove = new ArrayList<String>();
     Map<ContainerId, SimpleEntry<Container, UpdateContainerRequest>> oldChange =
         new HashMap<>();
+    List<SchedulingRequest> schedulingRequestList = new LinkedList<>();
+
     try {
       synchronized (this) {
         askList = cloneAsks();
@@ -286,10 +287,13 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
         oldChange.putAll(change);
         List<UpdateContainerRequest> updateList = createUpdateList();
         releaseList = new ArrayList<ContainerId>(release);
+        schedulingRequestList = new ArrayList<>(schedulingRequests);
+
         // optimistically clear this collection assuming no RPC failure
         ask.clear();
         release.clear();
         change.clear();
+        schedulingRequests.clear();
 
         blacklistToAdd.addAll(blacklistAdditions);
         blacklistToRemove.addAll(blacklistRemovals);
@@ -301,8 +305,9 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
         allocateRequest = AllocateRequest.newBuilder()
             .responseId(lastResponseId).progress(progressIndicator)
             .askList(askList).resourceBlacklistRequest(blacklistRequest)
-            .releaseList(releaseList).updateRequests(updateList).build();
-        populateSchedulingRequests(allocateRequest);
+            .releaseList(releaseList).updateRequests(updateList)
+            .schedulingRequests(schedulingRequestList).build();
+
         // clear blacklistAdditions and blacklistRemovals before
         // unsynchronized part
         blacklistAdditions.clear();
@@ -311,10 +316,6 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
 
       try {
         allocateResponse = rmClient.allocate(allocateRequest);
-        removeFromOutstandingSchedulingRequests(
-            allocateResponse.getAllocatedContainers());
-        removeFromOutstandingSchedulingRequests(
-            allocateResponse.getContainersFromPreviousAttempts());
       } catch (ApplicationMasterNotRegisteredException e) {
         LOG.warn("ApplicationMaster is out of sync with ResourceManager,"
             + " hence resyncing.");
@@ -331,6 +332,10 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
             }
           }
           change.putAll(this.pendingChange);
+          for (List<SchedulingRequest> schedReqs :
+              this.outstandingSchedRequests.values()) {
+            this.schedulingRequests.addAll(schedReqs);
+          }
         }
         // re register with RM
         registerApplicationMaster();
@@ -370,6 +375,12 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
             removePendingChangeRequests(changed);
           }
         }
+        AMRMClientUtils.removeFromOutstandingSchedulingRequests(
+            allocateResponse.getAllocatedContainers(),
+            this.outstandingSchedRequests);
+        AMRMClientUtils.removeFromOutstandingSchedulingRequests(
+            allocateResponse.getContainersFromPreviousAttempts(),
+            this.outstandingSchedRequests);
       }
     } finally {
       // TODO how to differentiate remote yarn exception vs error in rpc
@@ -410,108 +421,12 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
           }
           blacklistAdditions.addAll(blacklistToAdd);
           blacklistRemovals.addAll(blacklistToRemove);
-        }
-      }
-    }
-    return allocateResponse;
-  }
 
-  private void populateSchedulingRequests(AllocateRequest allocateRequest) {
-    synchronized (this.batchedSchedulingRequests) {
-      if (!this.batchedSchedulingRequests.isEmpty()) {
-        List<SchedulingRequest> newReqs = new LinkedList<>();
-        Iterator<Collection<SchedulingRequest>> iter =
-            this.batchedSchedulingRequests.iterator();
-        while (iter.hasNext()) {
-          Collection<SchedulingRequest> requests = iter.next();
-          newReqs.addAll(requests);
-          addToOutstandingSchedulingRequests(requests);
-          iter.remove();
-        }
-        allocateRequest.setSchedulingRequests(newReqs);
-      }
-    }
-  }
-
-  private void recreateSchedulingRequestBatch() {
-    List<SchedulingRequest> batched = new ArrayList<>();
-    synchronized (this.outstandingSchedRequests) {
-      for (List<SchedulingRequest> schedReqs :
-          this.outstandingSchedRequests.values()) {
-        batched.addAll(schedReqs);
-      }
-    }
-    synchronized (this.batchedSchedulingRequests) {
-      this.batchedSchedulingRequests.add(batched);
-    }
-  }
-
-  private void addToOutstandingSchedulingRequests(
-      Collection<SchedulingRequest> requests) {
-    for (SchedulingRequest req : requests) {
-      List<SchedulingRequest> schedulingRequests =
-          this.outstandingSchedRequests.computeIfAbsent(
-              req.getAllocationTags(), x -> new LinkedList<>());
-      SchedulingRequest matchingReq = null;
-      synchronized (schedulingRequests) {
-        for (SchedulingRequest schedReq : schedulingRequests) {
-          if (isMatching(req, schedReq)) {
-            matchingReq = schedReq;
-            break;
-          }
-        }
-        if (matchingReq != null) {
-          matchingReq.getResourceSizing().setNumAllocations(
-              req.getResourceSizing().getNumAllocations());
-        } else {
-          schedulingRequests.add(req);
-        }
-      }
-    }
-  }
-
-  private boolean isMatching(SchedulingRequest schedReq1,
-      SchedulingRequest schedReq2) {
-    return schedReq1.getPriority().equals(schedReq2.getPriority()) &&
-        schedReq1.getExecutionType().getExecutionType().equals(
-            schedReq1.getExecutionType().getExecutionType()) &&
-        schedReq1.getAllocationRequestId() ==
-            schedReq2.getAllocationRequestId();
-  }
-
-  private void removeFromOutstandingSchedulingRequests(
-      Collection<Container> containers) {
-    if (containers == null || containers.isEmpty()) {
-      return;
-    }
-    for (Container container : containers) {
-      if (container.getAllocationTags() != null &&
-          !container.getAllocationTags().isEmpty()) {
-        List<SchedulingRequest> schedReqs =
-            this.outstandingSchedRequests.get(container.getAllocationTags());
-        if (schedReqs != null && !schedReqs.isEmpty()) {
-          synchronized (schedReqs) {
-            Iterator<SchedulingRequest> iter = schedReqs.iterator();
-            while (iter.hasNext()) {
-              SchedulingRequest schedReq = iter.next();
-              if (schedReq.getPriority().equals(container.getPriority()) &&
-                  schedReq.getAllocationRequestId() ==
-                      container.getAllocationRequestId()) {
-                int numAllocations =
-                    schedReq.getResourceSizing().getNumAllocations();
-                numAllocations--;
-                if (numAllocations == 0) {
-                  iter.remove();
-                } else {
-                  schedReq.getResourceSizing()
-                      .setNumAllocations(numAllocations);
-                }
-              }
-            }
-          }
+          schedulingRequests.addAll(schedulingRequestList);
         }
       }
     }
+    return allocateResponse;
   }
 
   private List<UpdateContainerRequest> createUpdateList() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159bffc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java
new file mode 100644
index 0000000..387e399
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/AMRMClientUtils.java
@@ -0,0 +1,262 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client;
+
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
+import org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Utility class for AMRMClient.
+ */
+@Private
+public final class AMRMClientUtils {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(AMRMClientUtils.class);
+
+  public static final String APP_ALREADY_REGISTERED_MESSAGE =
+      "Application Master is already registered : ";
+
+  private AMRMClientUtils() {
+  }
+
+  /**
+   * Handle ApplicationNotRegistered exception and re-register.
+   *
+   * @param appId application Id
+   * @param rmProxy RM proxy instance
+   * @param registerRequest the AM re-register request
+   * @throws YarnException if re-register fails
+   */
+  public static void handleNotRegisteredExceptionAndReRegister(
+      ApplicationId appId, ApplicationMasterProtocol rmProxy,
+      RegisterApplicationMasterRequest registerRequest) throws YarnException {
+    LOG.info("App attempt {} not registered, most likely due to RM failover. "
+        + " Trying to re-register.", appId);
+    try {
+      rmProxy.registerApplicationMaster(registerRequest);
+    } catch (Exception e) {
+      if (e instanceof InvalidApplicationMasterRequestException
+          && e.getMessage().contains(APP_ALREADY_REGISTERED_MESSAGE)) {
+        LOG.info("Concurrent thread successfully registered, moving on.");
+      } else {
+        LOG.error("Error trying to re-register AM", e);
+        throw new YarnException(e);
+      }
+    }
+  }
+
+  /**
+   * Helper method for client calling ApplicationMasterProtocol.allocate that
+   * handles re-register if RM fails over.
+   *
+   * @param request allocate request
+   * @param rmProxy RM proxy
+   * @param registerRequest the register request for re-register
+   * @param appId application id
+   * @return allocate response
+   * @throws YarnException if RM call fails
+   * @throws IOException if RM call fails
+   */
+  public static AllocateResponse allocateWithReRegister(AllocateRequest request,
+      ApplicationMasterProtocol rmProxy,
+      RegisterApplicationMasterRequest registerRequest, ApplicationId appId)
+      throws YarnException, IOException {
+    try {
+      return rmProxy.allocate(request);
+    } catch (ApplicationMasterNotRegisteredException e) {
+      handleNotRegisteredExceptionAndReRegister(appId, rmProxy,
+          registerRequest);
+      // reset responseId after re-register
+      request.setResponseId(0);
+      // retry allocate
+      return allocateWithReRegister(request, rmProxy, registerRequest, appId);
+    }
+  }
+
+  /**
+   * Helper method for client calling
+   * ApplicationMasterProtocol.finishApplicationMaster that handles re-register
+   * if RM fails over.
+   *
+   * @param request finishApplicationMaster request
+   * @param rmProxy RM proxy
+   * @param registerRequest the register request for re-register
+   * @param appId application id
+   * @return finishApplicationMaster response
+   * @throws YarnException if RM call fails
+   * @throws IOException if RM call fails
+   */
+  public static FinishApplicationMasterResponse finishAMWithReRegister(
+      FinishApplicationMasterRequest request, ApplicationMasterProtocol rmProxy,
+      RegisterApplicationMasterRequest registerRequest, ApplicationId appId)
+      throws YarnException, IOException {
+    try {
+      return rmProxy.finishApplicationMaster(request);
+    } catch (ApplicationMasterNotRegisteredException ex) {
+      handleNotRegisteredExceptionAndReRegister(appId, rmProxy,
+          registerRequest);
+      // retry finishAM after re-register
+      return finishAMWithReRegister(request, rmProxy, registerRequest, appId);
+    }
+  }
+
+  /**
+   * Create a proxy for the specified protocol.
+   *
+   * @param configuration Configuration to generate {@link ClientRMProxy}
+   * @param protocol Protocol for the proxy
+   * @param user the user on whose behalf the proxy is being created
+   * @param token the auth token to use for connection
+   * @param <T> Type information of the proxy
+   * @return Proxy to the RM
+   * @throws IOException on failure
+   */
+  @Public
+  @Unstable
+  public static <T> T createRMProxy(final Configuration configuration,
+      final Class<T> protocol, UserGroupInformation user,
+      final Token<? extends TokenIdentifier> token) throws IOException {
+    try {
+      String rmClusterId = configuration.get(YarnConfiguration.RM_CLUSTER_ID,
+          YarnConfiguration.DEFAULT_RM_CLUSTER_ID);
+      LOG.info("Creating RMProxy to RM {} for protocol {} for user {}",
+          rmClusterId, protocol.getSimpleName(), user);
+      if (token != null) {
+        // preserve the token service sent by the RM when adding the token
+        // to ensure we replace the previous token setup by the RM.
+        // Afterwards we can update the service address for the RPC layer.
+        // Same as YarnServerSecurityUtils.updateAMRMToken()
+        user.addToken(token);
+        token.setService(ClientRMProxy.getAMRMTokenService(configuration));
+        setAuthModeInConf(configuration);
+      }
+      final T proxyConnection = user.doAs(new PrivilegedExceptionAction<T>() {
+        @Override
+        public T run() throws Exception {
+          return ClientRMProxy.createRMProxy(configuration, protocol);
+        }
+      });
+      return proxyConnection;
+
+    } catch (InterruptedException e) {
+      throw new YarnRuntimeException(e);
+    }
+  }
+
+  private static void setAuthModeInConf(Configuration conf) {
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+        SaslRpcServer.AuthMethod.TOKEN.toString());
+  }
+
+  public static void addToOutstandingSchedulingRequests(
+      Collection<SchedulingRequest> requests,
+      Map<Set<String>, List<SchedulingRequest>> outstandingSchedRequests) {
+    for (SchedulingRequest req : requests) {
+      List<SchedulingRequest> schedulingRequests = outstandingSchedRequests
+          .computeIfAbsent(req.getAllocationTags(), x -> new LinkedList<>());
+      SchedulingRequest matchingReq = null;
+      for (SchedulingRequest schedReq : schedulingRequests) {
+        if (isMatchingSchedulingRequests(req, schedReq)) {
+          matchingReq = schedReq;
+          break;
+        }
+      }
+      if (matchingReq != null) {
+        matchingReq.getResourceSizing()
+            .setNumAllocations(req.getResourceSizing().getNumAllocations());
+      } else {
+        schedulingRequests.add(req);
+      }
+    }
+  }
+
+  public static boolean isMatchingSchedulingRequests(
+      SchedulingRequest schedReq1, SchedulingRequest schedReq2) {
+    return schedReq1.getPriority().equals(schedReq2.getPriority()) &&
+        schedReq1.getExecutionType().getExecutionType().equals(
+            schedReq1.getExecutionType().getExecutionType()) &&
+        schedReq1.getAllocationRequestId() ==
+            schedReq2.getAllocationRequestId();
+  }
+
+  public static void removeFromOutstandingSchedulingRequests(
+      Collection<Container> containers,
+      Map<Set<String>, List<SchedulingRequest>> outstandingSchedRequests) {
+    if (containers == null || containers.isEmpty()) {
+      return;
+    }
+    for (Container container : containers) {
+      if (container.getAllocationTags() != null
+          && !container.getAllocationTags().isEmpty()) {
+        List<SchedulingRequest> schedReqs =
+            outstandingSchedRequests.get(container.getAllocationTags());
+        if (schedReqs != null && !schedReqs.isEmpty()) {
+          Iterator<SchedulingRequest> iter = schedReqs.iterator();
+          while (iter.hasNext()) {
+            SchedulingRequest schedReq = iter.next();
+            if (schedReq.getPriority().equals(container.getPriority())
+                && schedReq.getAllocationRequestId() == container
+                    .getAllocationRequestId()) {
+              int numAllocations =
+                  schedReq.getResourceSizing().getNumAllocations();
+              numAllocations--;
+              if (numAllocations == 0) {
+                iter.remove();
+              } else {
+                schedReq.getResourceSizing().setNumAllocations(numAllocations);
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159bffc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
new file mode 100644
index 0000000..c216ace
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
@@ -0,0 +1,364 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
+import org.apache.hadoop.yarn.client.AMRMClientUtils;
+import org.apache.hadoop.yarn.client.ClientRMProxy;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.scheduler.ResourceRequestSet;
+import org.apache.hadoop.yarn.server.scheduler.ResourceRequestSetKey;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * A component that sits in between AMRMClient(Impl) and Yarn RM. It remembers
+ * pending requests similar to AMRMClient, and handles RM re-sync automatically
+ * without propagate the re-sync exception back to AMRMClient.
+ */
+public class AMRMClientRelayer extends AbstractService
+    implements ApplicationMasterProtocol {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(AMRMClientRelayer.class);
+
+  private ApplicationMasterProtocol rmClient;
+
+  /**
+   * The original registration request that was sent by the AM. This instance is
+   * reused to register/re-register with all the sub-cluster RMs.
+   */
+  private RegisterApplicationMasterRequest amRegistrationRequest;
+
+  /**
+   * Similar to AMRMClientImpl, all data structures below have two versions:
+   *
+   * The remote ones are all the pending requests that RM has not fulfill yet.
+   * Whenever RM fails over, we re-register and then full re-send all these
+   * pending requests.
+   *
+   * The non-remote ones are the requests that RM has not received yet. When RM
+   * throws non-fail-over exception back, the request is considered not received
+   * by RM. We will merge with new requests and re-send in the next heart beat.
+   */
+  private Map<ResourceRequestSetKey, ResourceRequestSet> remotePendingAsks =
+      new HashMap<>();
+  /**
+   * Same as AMRMClientImpl, we need to use a custom comparator that does not
+   * look at ResourceRequest.getNumContainers() here. TreeSet allows a custom
+   * comparator.
+   */
+  private Set<ResourceRequest> ask =
+      new TreeSet<>(new ResourceRequest.ResourceRequestComparator());
+
+  private Set<ContainerId> remotePendingRelease = new HashSet<>();
+  private Set<ContainerId> release = new HashSet<>();
+
+  private Set<String> remoteBlacklistedNodes = new HashSet<>();
+  private Set<String> blacklistAdditions = new HashSet<>();
+  private Set<String> blacklistRemovals = new HashSet<>();
+
+  private Map<ContainerId, UpdateContainerRequest> remotePendingChange =
+      new HashMap<>();
+  private Map<ContainerId, UpdateContainerRequest> change = new HashMap<>();
+
+  private Map<Set<String>, List<SchedulingRequest>> remotePendingSchedRequest =
+      new HashMap<>();
+  private List<SchedulingRequest> schedulingRequest = new ArrayList<>();
+
+  public AMRMClientRelayer() {
+    super(AMRMClientRelayer.class.getName());
+  }
+
+  public AMRMClientRelayer(ApplicationMasterProtocol rmClient) {
+    this();
+    this.rmClient = rmClient;
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    super.serviceInit(conf);
+  }
+
+  @Override
+  protected void serviceStart() throws Exception {
+    final YarnConfiguration conf = new YarnConfiguration(getConfig());
+    try {
+      if (this.rmClient == null) {
+        this.rmClient =
+            ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class);
+      }
+    } catch (IOException e) {
+      throw new YarnRuntimeException(e);
+    }
+    super.serviceStart();
+  }
+
+  @Override
+  protected void serviceStop() throws Exception {
+    if (this.rmClient != null) {
+      RPC.stopProxy(this.rmClient);
+    }
+    super.serviceStop();
+  }
+
+  @Override
+  public RegisterApplicationMasterResponse registerApplicationMaster(
+      RegisterApplicationMasterRequest request)
+      throws YarnException, IOException {
+    this.amRegistrationRequest = request;
+    return this.rmClient.registerApplicationMaster(request);
+  }
+
+  @Override
+  public FinishApplicationMasterResponse finishApplicationMaster(
+      FinishApplicationMasterRequest request)
+      throws YarnException, IOException {
+    try {
+      return this.rmClient.finishApplicationMaster(request);
+    } catch (ApplicationMasterNotRegisteredException e) {
+      LOG.warn("Out of sync with ResourceManager, hence resyncing.");
+      // re register with RM
+      registerApplicationMaster(this.amRegistrationRequest);
+      return finishApplicationMaster(request);
+    }
+  }
+
+  @Override
+  public AllocateResponse allocate(AllocateRequest allocateRequest)
+      throws YarnException, IOException {
+    AllocateResponse allocateResponse = null;
+    try {
+      synchronized (this) {
+        // update the data structures first
+        addNewAsks(allocateRequest.getAskList());
+
+        if (allocateRequest.getReleaseList() != null) {
+          this.remotePendingRelease.addAll(allocateRequest.getReleaseList());
+          this.release.addAll(allocateRequest.getReleaseList());
+        }
+
+        if (allocateRequest.getResourceBlacklistRequest() != null) {
+          if (allocateRequest.getResourceBlacklistRequest()
+              .getBlacklistAdditions() != null) {
+            this.remoteBlacklistedNodes.addAll(allocateRequest
+                .getResourceBlacklistRequest().getBlacklistAdditions());
+            this.blacklistAdditions.addAll(allocateRequest
+                .getResourceBlacklistRequest().getBlacklistAdditions());
+          }
+          if (allocateRequest.getResourceBlacklistRequest()
+              .getBlacklistRemovals() != null) {
+            this.remoteBlacklistedNodes.removeAll(allocateRequest
+                .getResourceBlacklistRequest().getBlacklistRemovals());
+            this.blacklistRemovals.addAll(allocateRequest
+                .getResourceBlacklistRequest().getBlacklistRemovals());
+          }
+        }
+
+        if (allocateRequest.getUpdateRequests() != null) {
+          for (UpdateContainerRequest update : allocateRequest
+              .getUpdateRequests()) {
+            this.remotePendingChange.put(update.getContainerId(), update);
+            this.change.put(update.getContainerId(), update);
+          }
+        }
+
+        if (allocateRequest.getSchedulingRequests() != null) {
+          AMRMClientUtils.addToOutstandingSchedulingRequests(
+              allocateRequest.getSchedulingRequests(),
+              this.remotePendingSchedRequest);
+          this.schedulingRequest
+              .addAll(allocateRequest.getSchedulingRequests());
+        }
+
+        ArrayList<ResourceRequest> askList = new ArrayList<>(ask.size());
+        for (ResourceRequest r : ask) {
+          // create a copy of ResourceRequest as we might change it while the
+          // RPC layer is using it to send info across
+          askList.add(ResourceRequest.newBuilder().priority(r.getPriority())
+              .resourceName(r.getResourceName()).capability(r.getCapability())
+              .numContainers(r.getNumContainers())
+              .relaxLocality(r.getRelaxLocality())
+              .nodeLabelExpression(r.getNodeLabelExpression())
+              .executionTypeRequest(r.getExecutionTypeRequest())
+              .allocationRequestId(r.getAllocationRequestId()).build());
+        }
+
+        allocateRequest = AllocateRequest.newBuilder()
+            .responseId(allocateRequest.getResponseId())
+            .progress(allocateRequest.getProgress()).askList(askList)
+            .releaseList(new ArrayList<>(this.release))
+            .resourceBlacklistRequest(ResourceBlacklistRequest.newInstance(
+                new ArrayList<>(this.blacklistAdditions),
+                new ArrayList<>(this.blacklistRemovals)))
+            .updateRequests(new ArrayList<>(this.change.values()))
+            .schedulingRequests(new ArrayList<>(this.schedulingRequest))
+            .build();
+      }
+
+      // Do the actual allocate call
+      try {
+        allocateResponse = this.rmClient.allocate(allocateRequest);
+      } catch (ApplicationMasterNotRegisteredException e) {
+        LOG.warn("ApplicationMaster is out of sync with ResourceManager,"
+            + " hence resyncing.");
+
+        synchronized (this) {
+          // Add all remotePending data into to-send data structures
+          for (ResourceRequestSet requestSet : this.remotePendingAsks
+              .values()) {
+            for (ResourceRequest rr : requestSet.getRRs()) {
+              addResourceRequestToAsk(rr);
+            }
+          }
+          this.release.addAll(this.remotePendingRelease);
+          this.blacklistAdditions.addAll(this.remoteBlacklistedNodes);
+          this.change.putAll(this.remotePendingChange);
+          for (List<SchedulingRequest> reqs : this.remotePendingSchedRequest
+              .values()) {
+            this.schedulingRequest.addAll(reqs);
+          }
+        }
+
+        // re register with RM, then retry allocate recursively
+        registerApplicationMaster(this.amRegistrationRequest);
+        return allocate(allocateRequest);
+      }
+
+      synchronized (this) {
+        // Process the allocate response from RM
+        if (allocateResponse.getCompletedContainersStatuses() != null) {
+          for (ContainerStatus container : allocateResponse
+              .getCompletedContainersStatuses()) {
+            this.remotePendingRelease.remove(container.getContainerId());
+            this.remotePendingChange.remove(container.getContainerId());
+          }
+        }
+
+        if (allocateResponse.getUpdatedContainers() != null) {
+          for (UpdatedContainer updatedContainer : allocateResponse
+              .getUpdatedContainers()) {
+            this.remotePendingChange
+                .remove(updatedContainer.getContainer().getId());
+          }
+        }
+
+        AMRMClientUtils.removeFromOutstandingSchedulingRequests(
+            allocateResponse.getAllocatedContainers(),
+            this.remotePendingSchedRequest);
+        AMRMClientUtils.removeFromOutstandingSchedulingRequests(
+            allocateResponse.getContainersFromPreviousAttempts(),
+            this.remotePendingSchedRequest);
+      }
+
+    } finally {
+      synchronized (this) {
+        /*
+         * If allocateResponse is null, it means exception happened and RM did
+         * not accept the request. Don't clear any data structures so that they
+         * will be re-sent next time.
+         *
+         * Otherwise request was accepted by RM, we are safe to clear these.
+         */
+        if (allocateResponse != null) {
+          this.ask.clear();
+          this.release.clear();
+
+          this.blacklistAdditions.clear();
+          this.blacklistRemovals.clear();
+
+          this.change.clear();
+          this.schedulingRequest.clear();
+        }
+      }
+    }
+    return allocateResponse;
+  }
+
+  private void addNewAsks(List<ResourceRequest> asks) throws YarnException {
+    Set<ResourceRequestSetKey> touchedKeys = new HashSet<>();
+    for (ResourceRequest rr : asks) {
+      addResourceRequestToAsk(rr);
+
+      ResourceRequestSetKey key = new ResourceRequestSetKey(rr);
+      touchedKeys.add(key);
+
+      ResourceRequestSet askSet = this.remotePendingAsks.get(key);
+      if (askSet == null) {
+        askSet = new ResourceRequestSet(key);
+        this.remotePendingAsks.put(key, askSet);
+      }
+      askSet.addAndOverrideRR(rr);
+    }
+
+    // Cleanup properly if needed
+    for (ResourceRequestSetKey key : touchedKeys) {
+      ResourceRequestSet askSet = this.remotePendingAsks.get(key);
+      if (askSet.getNumContainers() == 0) {
+        this.remotePendingAsks.remove(key);
+      } else {
+        // Remove non-any zero RRs
+        askSet.cleanupZeroNonAnyRR();
+      }
+    }
+  }
+
+  private void addResourceRequestToAsk(ResourceRequest remoteRequest) {
+    // The ResourceRequestComparator doesn't look at container count when
+    // comparing. So we need to make sure the new RR override the old if any
+    this.ask.remove(remoteRequest);
+    this.ask.add(remoteRequest);
+  }
+
+  @VisibleForTesting
+  protected Map<ResourceRequestSetKey, ResourceRequestSet>
+      getRemotePendingAsks() {
+    return this.remotePendingAsks;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159bffc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationProxyProviderUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationProxyProviderUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationProxyProviderUtil.java
index 3931f2b..91924da 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationProxyProviderUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationProxyProviderUtil.java
@@ -27,12 +27,12 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.client.AMRMClientUtils;
 import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.client.RMFailoverProxyProvider;
 import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
-import org.apache.hadoop.yarn.server.utils.AMRMClientUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159bffc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/package-info.java
new file mode 100644
index 0000000..6289500
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159bffc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/ResourceRequestSet.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/ResourceRequestSet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/ResourceRequestSet.java
new file mode 100644
index 0000000..b1e6b6e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/ResourceRequestSet.java
@@ -0,0 +1,206 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.scheduler;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+/**
+ * A set of resource requests of the same scheduler key
+ * {@link ResourceRequestSetKey}.
+ */
+public class ResourceRequestSet {
+
+  private ResourceRequestSetKey key;
+  private int numContainers;
+  // ResourceName -> RR
+  private Map<String, ResourceRequest> asks;
+
+  /**
+   * Create a empty set with given key.
+   *
+   * @param key the key of the request set
+   * @throws YarnException if fails
+   */
+  public ResourceRequestSet(ResourceRequestSetKey key) throws YarnException {
+    this.key = key;
+    // leave it zero for now, as if it is a cancel
+    this.numContainers = 0;
+    this.asks = new HashMap<>();
+  }
+
+  /**
+   * Create a shallow copy of the request set.
+   *
+   * @param other the set of copy from
+   */
+  public ResourceRequestSet(ResourceRequestSet other) {
+    this.key = other.key;
+    this.numContainers = other.numContainers;
+    this.asks = new HashMap<>();
+    // The assumption is that the RR objects should not be modified without
+    // making a copy
+    this.asks.putAll(other.asks);
+  }
+
+  /**
+   * Add a {@link ResourceRequest} into the requestSet. If there's already an RR
+   * with the same resource name, override it and update accordingly.
+   *
+   * @param ask the new {@link ResourceRequest}
+   * @throws YarnException
+   */
+  public void addAndOverrideRR(ResourceRequest ask) throws YarnException {
+    if (!this.key.equals(new ResourceRequestSetKey(ask))) {
+      throw new YarnException(
+          "None compatible asks: \n" + ask + "\n" + this.key);
+    }
+
+    // Override directly if exists
+    this.asks.put(ask.getResourceName(), ask);
+
+    if (this.key.getExeType().equals(ExecutionType.GUARANTEED)) {
+      // For G requestSet, update the numContainers only for ANY RR
+      if (ask.getResourceName().equals(ResourceRequest.ANY)) {
+        this.numContainers = ask.getNumContainers();
+      }
+    } else {
+      // The assumption we made about O asks is that all RR in a requestSet has
+      // the same numContainers value. So we just take the value of the last RR
+      this.numContainers = ask.getNumContainers();
+    }
+    if (this.numContainers < 0) {
+      throw new YarnException("numContainers becomes " + this.numContainers
+          + " when adding ask " + ask + "\n requestSet: " + toString());
+    }
+  }
+
+  /**
+   * Merge a requestSet into this one.
+   *
+   * @param requestSet the requestSet to merge
+   * @throws YarnException
+   */
+  public void addAndOverrideRRSet(ResourceRequestSet requestSet)
+      throws YarnException {
+    if (requestSet == null) {
+      return;
+    }
+    for (ResourceRequest rr : requestSet.getRRs()) {
+      addAndOverrideRR(rr);
+    }
+  }
+
+  /**
+   * Remove all non-Any ResourceRequests from the set. This is necessary cleanup
+   * to avoid requestSet getting too big.
+   */
+  public void cleanupZeroNonAnyRR() {
+    Iterator<Entry<String, ResourceRequest>> iter =
+        this.asks.entrySet().iterator();
+    while (iter.hasNext()) {
+      Entry<String, ResourceRequest> entry = iter.next();
+      if (entry.getKey().equals(ResourceRequest.ANY)) {
+        // Do not delete ANY RR
+        continue;
+      }
+      if (entry.getValue().getNumContainers() == 0) {
+        iter.remove();
+      }
+    }
+  }
+
+  public Map<String, ResourceRequest> getAsks() {
+    return this.asks;
+  }
+
+  public Collection<ResourceRequest> getRRs() {
+    return this.asks.values();
+  }
+
+  public int getNumContainers() {
+    return this.numContainers;
+  }
+
+  /**
+   * Force set the # of containers to ask for this requestSet to a given value.
+   *
+   * @param newValue the new # of containers value
+   * @throws YarnException
+   */
+  public void setNumContainers(int newValue) throws YarnException {
+    if (this.numContainers == 0) {
+      throw new YarnException("should not set numContainers to " + newValue
+          + " for a cancel requestSet: " + toString());
+    }
+
+    // Clone the ResourceRequest object whenever we need to change it
+    int oldValue = this.numContainers;
+    this.numContainers = newValue;
+    if (this.key.getExeType().equals(ExecutionType.OPPORTUNISTIC)) {
+      // The assumption we made about O asks is that all RR in a requestSet has
+      // the same numContainers value
+      Map<String, ResourceRequest> newAsks = new HashMap<>();
+      for (ResourceRequest rr : this.asks.values()) {
+        ResourceRequest clone = cloneResourceRequest(rr);
+        clone.setNumContainers(newValue);
+        newAsks.put(clone.getResourceName(), clone);
+      }
+      this.asks = newAsks;
+    } else {
+      ResourceRequest rr = this.asks.get(ResourceRequest.ANY);
+      if (rr == null) {
+        throw new YarnException(
+            "No ANY RR found in requestSet with numContainers=" + oldValue);
+      }
+      ResourceRequest clone = cloneResourceRequest(rr);
+      clone.setNumContainers(newValue);
+      this.asks.put(ResourceRequest.ANY, clone);
+    }
+  }
+
+  private ResourceRequest cloneResourceRequest(ResourceRequest rr) {
+    return ResourceRequest.newBuilder().priority(rr.getPriority())
+        .resourceName(rr.getResourceName()).capability(rr.getCapability())
+        .numContainers(rr.getNumContainers())
+        .relaxLocality(rr.getRelaxLocality())
+        .nodeLabelExpression(rr.getNodeLabelExpression())
+        .executionTypeRequest(rr.getExecutionTypeRequest())
+        .allocationRequestId(rr.getAllocationRequestId()).build();
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder();
+    builder.append("{" + this.key.toString());
+    for (Entry<String, ResourceRequest> entry : this.asks.entrySet()) {
+      builder.append(
+          " " + entry.getValue().getNumContainers() + ":" + entry.getKey());
+    }
+    builder.append("}");
+    return builder.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159bffc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/ResourceRequestSetKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/ResourceRequestSetKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/ResourceRequestSetKey.java
new file mode 100644
index 0000000..4db88ef
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/ResourceRequestSetKey.java
@@ -0,0 +1,133 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.scheduler;
+
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+/**
+ * The scheduler key for a group of {@link ResourceRequest}.
+ *
+ * TODO: after YARN-7631 is fixed by adding Resource and ExecType into
+ * SchedulerRequestKey, then we can directly use that.
+ */
+public class ResourceRequestSetKey extends SchedulerRequestKey {
+
+  // More ResourceRequest key fields on top of SchedulerRequestKey
+  private final Resource resource;
+  private final ExecutionType execType;
+
+  /**
+   * Create the key object from a {@link ResourceRequest}.
+   *
+   * @param rr Resource request object
+   * @throws YarnException if fails
+   */
+  public ResourceRequestSetKey(ResourceRequest rr) throws YarnException {
+    this(rr.getAllocationRequestId(), rr.getPriority(), rr.getCapability(),
+        ((rr.getExecutionTypeRequest() == null) ? ExecutionType.GUARANTEED
+            : rr.getExecutionTypeRequest().getExecutionType()));
+    if (rr.getPriority() == null) {
+      throw new YarnException("Null priority in RR: " + rr);
+    }
+    if (rr.getCapability() == null) {
+      throw new YarnException("Null resource in RR: " + rr);
+    }
+  }
+
+  /**
+   * Create the key object from member objects.
+   *
+   * @param allocationRequestId allocate request id of the ask
+   * @param priority the priority of the ask
+   * @param resource the resource size of the ask
+   * @param execType the execution type of the ask
+   */
+  public ResourceRequestSetKey(long allocationRequestId, Priority priority,
+      Resource resource, ExecutionType execType) {
+    super(priority, allocationRequestId, null);
+
+    if (resource == null) {
+      this.resource = Resource.newInstance(0, 0);
+    } else {
+      this.resource = resource;
+    }
+    if (execType == null) {
+      this.execType = ExecutionType.GUARANTEED;
+    } else {
+      this.execType = execType;
+    }
+  }
+
+  public Resource getResource() {
+    return this.resource;
+  }
+
+  public ExecutionType getExeType() {
+    return this.execType;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null || !(obj instanceof SchedulerRequestKey)) {
+      return false;
+    }
+    if (!(obj instanceof ResourceRequestSetKey)) {
+      return super.equals(obj);
+    }
+    ResourceRequestSetKey other = (ResourceRequestSetKey) obj;
+    return super.equals(other) && this.resource.equals(other.resource)
+        && this.execType.equals(other.execType);
+  }
+
+  @Override
+  public int hashCode() {
+    return ((super.hashCode() * 37 + this.resource.hashCode()) * 41)
+        + this.execType.hashCode();
+  }
+
+  @Override
+  public int compareTo(SchedulerRequestKey other) {
+    int ret = super.compareTo(other);
+    if (ret != 0) {
+      return ret;
+    }
+    if (!(other instanceof ResourceRequestSetKey)) {
+      return ret;
+    }
+
+    ResourceRequestSetKey otherKey = (ResourceRequestSetKey) other;
+    ret = this.resource.compareTo(otherKey.resource);
+    if (ret != 0) {
+      return ret;
+    }
+    return this.execType.compareTo(otherKey.execType);
+  }
+
+  @Override
+  public String toString() {
+    return "[id:" + getAllocationRequestId() + " p:"
+        + getPriority().getPriority()
+        + (this.execType.equals(ExecutionType.GUARANTEED) ? " G"
+            : " O" + " r:" + this.resource + "]");
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159bffc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/SchedulerRequestKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/SchedulerRequestKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/SchedulerRequestKey.java
index 0fce083..c3b08d6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/SchedulerRequestKey.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/SchedulerRequestKey.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
  * Composite key for outstanding scheduler requests for any schedulable entity.
  * Currently it includes {@link Priority}.
  */
-public final class SchedulerRequestKey implements
+public class SchedulerRequestKey implements
     Comparable<SchedulerRequestKey> {
 
   private final Priority priority;
@@ -73,8 +73,6 @@ public final class SchedulerRequestKey implements
         container.getAllocationRequestId(), null);
   }
 
-
-
   public SchedulerRequestKey(Priority priority, long allocationRequestId,
       ContainerId containerToUpdate) {
     this.priority = priority;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159bffc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java
index 677c4e6..02eef29 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java
@@ -47,9 +47,9 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterReque
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.client.AMRMClientUtils;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
-import org.apache.hadoop.yarn.server.utils.AMRMClientUtils;
 import org.apache.hadoop.yarn.util.AsyncCallback;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159bffc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
index 3f4a110..10985e0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
@@ -56,13 +56,13 @@ import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.client.AMRMClientUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
-import org.apache.hadoop.yarn.server.utils.AMRMClientUtils;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils;
 import org.apache.hadoop.yarn.util.AsyncCallback;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159bffc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
deleted file mode 100644
index 37e2b5e..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.utils;
-
-import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
-
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.classification.InterfaceAudience.Public;
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.security.SaslRpcServer;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.client.ClientRMProxy;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
-import org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Utility class for AMRMClient.
- */
-@Private
-public final class AMRMClientUtils {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(AMRMClientUtils.class);
-
-  public static final String APP_ALREADY_REGISTERED_MESSAGE =
-      "Application Master is already registered : ";
-
-  private AMRMClientUtils() {
-  }
-
-  /**
-   * Handle ApplicationNotRegistered exception and re-register.
-   *
-   * @param appId application Id
-   * @param rmProxy RM proxy instance
-   * @param registerRequest the AM re-register request
-   * @throws YarnException if re-register fails
-   */
-  public static void handleNotRegisteredExceptionAndReRegister(
-      ApplicationId appId, ApplicationMasterProtocol rmProxy,
-      RegisterApplicationMasterRequest registerRequest) throws YarnException {
-    LOG.info("App attempt {} not registered, most likely due to RM failover. "
-        + " Trying to re-register.", appId);
-    try {
-      rmProxy.registerApplicationMaster(registerRequest);
-    } catch (Exception e) {
-      if (e instanceof InvalidApplicationMasterRequestException
-          && e.getMessage().contains(APP_ALREADY_REGISTERED_MESSAGE)) {
-        LOG.info("Concurrent thread successfully registered, moving on.");
-      } else {
-        LOG.error("Error trying to re-register AM", e);
-        throw new YarnException(e);
-      }
-    }
-  }
-
-  /**
-   * Helper method for client calling ApplicationMasterProtocol.allocate that
-   * handles re-register if RM fails over.
-   *
-   * @param request allocate request
-   * @param rmProxy RM proxy
-   * @param registerRequest the register request for re-register
-   * @param appId application id
-   * @return allocate response
-   * @throws YarnException if RM call fails
-   * @throws IOException if RM call fails
-   */
-  public static AllocateResponse allocateWithReRegister(AllocateRequest request,
-      ApplicationMasterProtocol rmProxy,
-      RegisterApplicationMasterRequest registerRequest, ApplicationId appId)
-      throws YarnException, IOException {
-    try {
-      return rmProxy.allocate(request);
-    } catch (ApplicationMasterNotRegisteredException e) {
-      handleNotRegisteredExceptionAndReRegister(appId, rmProxy,
-          registerRequest);
-      // reset responseId after re-register
-      request.setResponseId(0);
-      // retry allocate
-      return allocateWithReRegister(request, rmProxy, registerRequest, appId);
-    }
-  }
-
-  /**
-   * Helper method for client calling
-   * ApplicationMasterProtocol.finishApplicationMaster that handles re-register
-   * if RM fails over.
-   *
-   * @param request finishApplicationMaster request
-   * @param rmProxy RM proxy
-   * @param registerRequest the register request for re-register
-   * @param appId application id
-   * @return finishApplicationMaster response
-   * @throws YarnException if RM call fails
-   * @throws IOException if RM call fails
-   */
-  public static FinishApplicationMasterResponse finishAMWithReRegister(
-      FinishApplicationMasterRequest request, ApplicationMasterProtocol rmProxy,
-      RegisterApplicationMasterRequest registerRequest, ApplicationId appId)
-      throws YarnException, IOException {
-    try {
-      return rmProxy.finishApplicationMaster(request);
-    } catch (ApplicationMasterNotRegisteredException ex) {
-      handleNotRegisteredExceptionAndReRegister(appId, rmProxy,
-          registerRequest);
-      // retry finishAM after re-register
-      return finishAMWithReRegister(request, rmProxy, registerRequest, appId);
-    }
-  }
-
-  /**
-   * Create a proxy for the specified protocol.
-   *
-   * @param configuration Configuration to generate {@link ClientRMProxy}
-   * @param protocol Protocol for the proxy
-   * @param user the user on whose behalf the proxy is being created
-   * @param token the auth token to use for connection
-   * @param <T> Type information of the proxy
-   * @return Proxy to the RM
-   * @throws IOException on failure
-   */
-  @Public
-  @Unstable
-  public static <T> T createRMProxy(final Configuration configuration,
-      final Class<T> protocol, UserGroupInformation user,
-      final Token<? extends TokenIdentifier> token) throws IOException {
-    try {
-      String rmClusterId = configuration.get(YarnConfiguration.RM_CLUSTER_ID,
-          YarnConfiguration.DEFAULT_RM_CLUSTER_ID);
-      LOG.info("Creating RMProxy to RM {} for protocol {} for user {}",
-          rmClusterId, protocol.getSimpleName(), user);
-      if (token != null) {
-        // preserve the token service sent by the RM when adding the token
-        // to ensure we replace the previous token setup by the RM.
-        // Afterwards we can update the service address for the RPC layer.
-        // Same as YarnServerSecurityUtils.updateAMRMToken()
-        user.addToken(token);
-        token.setService(ClientRMProxy.getAMRMTokenService(configuration));
-        setAuthModeInConf(configuration);
-      }
-      final T proxyConnection = user.doAs(new PrivilegedExceptionAction<T>() {
-        @Override
-        public T run() throws Exception {
-          return ClientRMProxy.createRMProxy(configuration, protocol);
-        }
-      });
-      return proxyConnection;
-
-    } catch (InterruptedException e) {
-      throw new YarnRuntimeException(e);
-    }
-  }
-
-  private static void setAuthModeInConf(Configuration conf) {
-    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
-        SaslRpcServer.AuthMethod.TOKEN.toString());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159bffc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
index 15e1cea..23cd3e2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
@@ -126,6 +126,7 @@ import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.client.AMRMClientUtils;
 import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException;
@@ -158,7 +159,6 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequ
 import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
-import org.apache.hadoop.yarn.server.utils.AMRMClientUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.junit.Assert;
 import org.slf4j.Logger;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159bffc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/TestAMRMClientRelayer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/TestAMRMClientRelayer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/TestAMRMClientRelayer.java
new file mode 100644
index 0000000..22bb1f9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/TestAMRMClientRelayer.java
@@ -0,0 +1,275 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.scheduler.ResourceRequestSet;
+import org.apache.hadoop.yarn.util.Records;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Unit test for AMRMClientRelayer.
+ */
+public class TestAMRMClientRelayer {
+
+  /**
+   * Mocked ApplicationMasterService in RM.
+   */
+  public static class MockApplicationMasterService
+      implements ApplicationMasterProtocol {
+
+    // Whether this mockRM will throw failover exception upon next heartbeat
+    // from AM
+    private boolean failover = false;
+    private List<ResourceRequest> lastAsk;
+    private List<ContainerId> lastRelease;
+    private List<String> lastBlacklistAdditions;
+    private List<String> lastBlacklistRemovals;
+
+    @Override
+    public RegisterApplicationMasterResponse registerApplicationMaster(
+        RegisterApplicationMasterRequest request)
+        throws YarnException, IOException {
+      return null;
+    }
+
+    @Override
+    public FinishApplicationMasterResponse finishApplicationMaster(
+        FinishApplicationMasterRequest request)
+        throws YarnException, IOException {
+      if (this.failover) {
+        this.failover = false;
+        throw new ApplicationMasterNotRegisteredException("Mock RM restarted");
+      }
+      return null;
+    }
+
+    @Override
+    public AllocateResponse allocate(AllocateRequest request)
+        throws YarnException, IOException {
+      if (this.failover) {
+        this.failover = false;
+        throw new ApplicationMasterNotRegisteredException("Mock RM restarted");
+      }
+      this.lastAsk = request.getAskList();
+      this.lastRelease = request.getReleaseList();
+      this.lastBlacklistAdditions =
+          request.getResourceBlacklistRequest().getBlacklistAdditions();
+      this.lastBlacklistRemovals =
+          request.getResourceBlacklistRequest().getBlacklistRemovals();
+      return AllocateResponse.newInstance(0, null, null,
+          new ArrayList<NodeReport>(), Resource.newInstance(0, 0), null, 0,
+          null, null);
+    }
+
+    public void setFailoverFlag() {
+      this.failover = true;
+    }
+  }
+
+  private Configuration conf;
+  private MockApplicationMasterService mockAMS;
+  private AMRMClientRelayer relayer;
+
+  // Buffer of asks that will be sent to RM in the next AM heartbeat
+  private List<ResourceRequest> asks = new ArrayList<>();
+  private List<ContainerId> releases = new ArrayList<>();
+  private List<String> blacklistAdditions = new ArrayList<>();
+  private List<String> blacklistRemoval = new ArrayList<>();
+
+  @Before
+  public void setup() throws YarnException, IOException {
+    this.conf = new Configuration();
+
+    this.mockAMS = new MockApplicationMasterService();
+    this.relayer = new AMRMClientRelayer(this.mockAMS);
+
+    this.relayer.init(conf);
+    this.relayer.start();
+
+    this.relayer.registerApplicationMaster(
+        RegisterApplicationMasterRequest.newInstance("", 0, ""));
+
+    clearAllocateRequestLists();
+  }
+
+  private void assertAsksAndReleases(int expectedAsk, int expectedRelease) {
+    Assert.assertEquals(expectedAsk, this.mockAMS.lastAsk.size());
+    Assert.assertEquals(expectedRelease, this.mockAMS.lastRelease.size());
+  }
+
+  private void assertBlacklistAdditionsAndRemovals(int expectedAdditions,
+      int expectedRemovals) {
+    Assert.assertEquals(expectedAdditions,
+        this.mockAMS.lastBlacklistAdditions.size());
+    Assert.assertEquals(expectedRemovals,
+        this.mockAMS.lastBlacklistRemovals.size());
+  }
+
+  private AllocateRequest getAllocateRequest() {
+    // Need to create a new one every time because rather than directly
+    // referring the lists, the protobuf impl makes a copy of the lists
+    return AllocateRequest.newInstance(0, 0, asks, releases,
+        ResourceBlacklistRequest.newInstance(blacklistAdditions,
+            blacklistRemoval));
+  }
+
+  private void clearAllocateRequestLists() {
+    this.asks.clear();
+    this.releases.clear();
+    this.blacklistAdditions.clear();
+    this.blacklistRemoval.clear();
+  }
+
+  private static ContainerId createContainerId(int id) {
+    return ContainerId.newContainerId(
+        ApplicationAttemptId.newInstance(ApplicationId.newInstance(1, 1), 1),
+        id);
+  }
+
+  protected ResourceRequest createResourceRequest(long id, String resource,
+      int memory, int vCores, int priority, ExecutionType execType,
+      int containers) {
+    ResourceRequest req = Records.newRecord(ResourceRequest.class);
+    req.setAllocationRequestId(id);
+    req.setResourceName(resource);
+    req.setCapability(Resource.newInstance(memory, vCores));
+    req.setPriority(Priority.newInstance(priority));
+    req.setExecutionTypeRequest(ExecutionTypeRequest.newInstance(execType));
+    req.setNumContainers(containers);
+    return req;
+  }
+
+  /**
+   * Test the proper handling of removal/cancel of resource requests.
+   */
+  @Test
+  public void testResourceRequestCleanup() throws YarnException, IOException {
+    // Ask for two containers, one with location preference
+    this.asks.add(createResourceRequest(0, "node", 2048, 1, 1,
+        ExecutionType.GUARANTEED, 1));
+    this.asks.add(createResourceRequest(0, "rack", 2048, 1, 1,
+        ExecutionType.GUARANTEED, 1));
+    this.asks.add(createResourceRequest(0, ResourceRequest.ANY, 2048, 1, 1,
+        ExecutionType.GUARANTEED, 2));
+    this.relayer.allocate(getAllocateRequest());
+
+    assertAsksAndReleases(3, 0);
+    Assert.assertEquals(1, this.relayer.getRemotePendingAsks().size());
+    ResourceRequestSet set =
+        this.relayer.getRemotePendingAsks().values().iterator().next();
+    Assert.assertEquals(3, set.getAsks().size());
+    clearAllocateRequestLists();
+
+    // Cancel one ask
+    this.asks.add(createResourceRequest(0, "node", 2048, 1, 1,
+        ExecutionType.GUARANTEED, 0));
+    this.asks.add(createResourceRequest(0, ResourceRequest.ANY, 2048, 1, 1,
+        ExecutionType.GUARANTEED, 1));
+    this.relayer.allocate(getAllocateRequest());
+
+    assertAsksAndReleases(2, 0);
+    Assert.assertEquals(1, relayer.getRemotePendingAsks().size());
+    set = this.relayer.getRemotePendingAsks().values().iterator().next();
+    Assert.assertEquals(2, set.getAsks().size());
+    clearAllocateRequestLists();
+
+    // Cancel the other ask, the pending askSet should be removed
+    this.asks.add(createResourceRequest(0, ResourceRequest.ANY, 2048, 1, 1,
+        ExecutionType.GUARANTEED, 0));
+    this.relayer.allocate(AllocateRequest.newInstance(0, 0, asks, null, null));
+
+    assertAsksAndReleases(1, 0);
+    Assert.assertEquals(0, this.relayer.getRemotePendingAsks().size());
+  }
+
+  /**
+   * Test the full pending resend after RM fails over.
+   */
+  @Test
+  public void testResendRequestsOnRMRestart()
+      throws YarnException, IOException {
+    ContainerId c1 = createContainerId(1);
+    ContainerId c2 = createContainerId(2);
+    ContainerId c3 = createContainerId(3);
+
+    // Ask for two containers, one with location preference
+    this.asks.add(createResourceRequest(0, "node1", 2048, 1, 1,
+        ExecutionType.GUARANTEED, 1));
+    this.asks.add(createResourceRequest(0, "rack", 2048, 1, 1,
+        ExecutionType.GUARANTEED, 1));
+    this.asks.add(createResourceRequest(0, ResourceRequest.ANY, 2048, 1, 1,
+        ExecutionType.GUARANTEED, 2));
+
+    this.releases.add(c1);
+    this.blacklistAdditions.add("node1");
+    this.blacklistRemoval.add("node0");
+
+    // 1. a fully loaded request
+    this.relayer.allocate(getAllocateRequest());
+    assertAsksAndReleases(3, 1);
+    assertBlacklistAdditionsAndRemovals(1, 1);
+    clearAllocateRequestLists();
+
+    // 2. empty request
+    this.relayer.allocate(getAllocateRequest());
+    assertAsksAndReleases(0, 0);
+    assertBlacklistAdditionsAndRemovals(0, 0);
+    clearAllocateRequestLists();
+
+    // Set RM restart and failover flag
+    this.mockAMS.setFailoverFlag();
+
+    // More requests
+    this.blacklistAdditions.add("node2");
+    this.releases.add(c2);
+    this.relayer.allocate(getAllocateRequest());
+
+    // verify pending requests are fully re-sent
+    assertAsksAndReleases(3, 2);
+    assertBlacklistAdditionsAndRemovals(2, 0);
+    clearAllocateRequestLists();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159bffc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
index 9a53a50..5740749 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.StrictPreemptionContract;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
+import org.apache.hadoop.yarn.client.AMRMClientUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -78,7 +79,6 @@ import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
 import org.apache.hadoop.yarn.server.federation.utils.FederationRegistryClient;
 import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
 import org.apache.hadoop.yarn.server.uam.UnmanagedAMPoolManager;
-import org.apache.hadoop.yarn.server.utils.AMRMClientUtils;
 import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils;
 import org.apache.hadoop.yarn.util.AsyncCallback;
 import org.apache.hadoop.yarn.util.ConverterUtils;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159bffc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index ae28879..7dac2cd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.client.AMRMClientUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
@@ -73,7 +74,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.proces
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider;
 import org.apache.hadoop.yarn.server.security.MasterKeyData;
-import org.apache.hadoop.yarn.server.utils.AMRMClientUtils;
 import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: YARN-8248. Job hangs when a job requests a resource that its queue does not have. (Szilard Nemeth via Haibo Chen)

Posted by ar...@apache.org.
YARN-8248. Job hangs when a job requests a resource that its queue does not have. (Szilard Nemeth via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f48fec83
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f48fec83
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f48fec83

Branch: refs/heads/HDDS-48
Commit: f48fec83d0f2d1a781a141ad7216463c5526321f
Parents: 3d2d9db
Author: Haibo Chen <ha...@apache.org>
Authored: Mon May 21 08:00:21 2018 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Mon May 21 08:10:41 2018 -0700

----------------------------------------------------------------------
 .../scheduler/SchedulerUtils.java               | 159 ++++++++++----
 .../scheduler/fair/FSAppAttempt.java            |  10 +-
 .../scheduler/fair/FSParentQueue.java           |   3 +
 .../scheduler/fair/FairScheduler.java           | 115 +++++++++--
 .../scheduler/fair/FairSchedulerTestBase.java   |  64 ++++--
 .../scheduler/fair/TestFairScheduler.java       | 205 +++++++++++++++++++
 6 files changed, 481 insertions(+), 75 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f48fec83/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
index 9b3c20a..7de250d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
@@ -18,9 +18,14 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -40,6 +45,8 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.InvalidLabelResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
+import org.apache.hadoop.yarn.exceptions
+        .SchedulerInvalidResoureRequestException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.security.AccessType;
@@ -61,12 +68,37 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 @Unstable
 public class SchedulerUtils {
 
+  /**
+   * This class contains invalid resource information along with its
+   * resource request.
+   */
+  public static class MaxResourceValidationResult {
+    private ResourceRequest resourceRequest;
+    private List<ResourceInformation> invalidResources;
+
+    MaxResourceValidationResult(ResourceRequest resourceRequest,
+        List<ResourceInformation> invalidResources) {
+      this.resourceRequest = resourceRequest;
+      this.invalidResources = invalidResources;
+    }
+
+    public boolean isValid() {
+      return invalidResources.isEmpty();
+    }
+
+    @Override
+    public String toString() {
+      return "MaxResourceValidationResult{" + "resourceRequest="
+          + resourceRequest + ", invalidResources=" + invalidResources + '}';
+    }
+  }
+
   private static final Log LOG = LogFactory.getLog(SchedulerUtils.class);
 
-  private static final RecordFactory recordFactory = 
+  private static final RecordFactory recordFactory =
       RecordFactoryProvider.getRecordFactory(null);
 
-  public static final String RELEASED_CONTAINER = 
+  public static final String RELEASED_CONTAINER =
       "Container released by application";
 
   public static final String UPDATED_CONTAINER =
@@ -325,6 +357,22 @@ public class SchedulerUtils {
     }
   }
 
+  private static Map<String, ResourceInformation> getZeroResources(
+      Resource resource) {
+    Map<String, ResourceInformation> resourceInformations = Maps.newHashMap();
+    int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+
+    for (int i = 0; i < maxLength; i++) {
+      ResourceInformation resourceInformation =
+          resource.getResourceInformation(i);
+      if (resourceInformation.getValue() == 0L) {
+        resourceInformations.put(resourceInformation.getName(),
+            resourceInformation);
+      }
+    }
+    return resourceInformations;
+  }
+
   @Private
   @VisibleForTesting
   static void checkResourceRequestAgainstAvailableResource(Resource reqResource,
@@ -339,47 +387,86 @@ public class SchedulerUtils {
             reqResourceName);
       }
 
-      final ResourceInformation availableRI =
-          availableResource.getResourceInformation(reqResourceName);
+      boolean valid = checkResource(requestedRI, availableResource);
+      if (!valid) {
+        throwInvalidResourceException(reqResource, availableResource,
+            reqResourceName);
+      }
+    }
+  }
 
-      long requestedResourceValue = requestedRI.getValue();
-      long availableResourceValue = availableRI.getValue();
-      int unitsRelation = UnitsConversionUtil
-          .compareUnits(requestedRI.getUnits(), availableRI.getUnits());
+  public static MaxResourceValidationResult
+      validateResourceRequestsAgainstQueueMaxResource(
+      ResourceRequest resReq, Resource availableResource)
+      throws SchedulerInvalidResoureRequestException {
+    final Resource reqResource = resReq.getCapability();
+    Map<String, ResourceInformation> resourcesWithZeroAmount =
+        getZeroResources(availableResource);
+
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Resources with zero amount: "
+          + Arrays.toString(resourcesWithZeroAmount.entrySet().toArray()));
+    }
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Requested resource information: " + requestedRI);
-        LOG.debug("Available resource information: " + availableRI);
-        LOG.debug("Relation of units: " + unitsRelation);
-      }
+    List<ResourceInformation> invalidResources = Lists.newArrayList();
+    for (int i = 0; i < ResourceUtils.getNumberOfKnownResourceTypes(); i++) {
+      final ResourceInformation requestedRI =
+          reqResource.getResourceInformation(i);
+      final String reqResourceName = requestedRI.getName();
 
-      // requested resource unit is less than available resource unit
-      // e.g. requestedUnit: "m", availableUnit: "K")
-      if (unitsRelation < 0) {
-        availableResourceValue =
-            UnitsConversionUtil.convert(availableRI.getUnits(),
-                requestedRI.getUnits(), availableRI.getValue());
-
-        // requested resource unit is greater than available resource unit
-        // e.g. requestedUnit: "G", availableUnit: "M")
-      } else if (unitsRelation > 0) {
-        requestedResourceValue =
-            UnitsConversionUtil.convert(requestedRI.getUnits(),
-                availableRI.getUnits(), requestedRI.getValue());
+      if (resourcesWithZeroAmount.containsKey(reqResourceName)
+          && requestedRI.getValue() > 0) {
+        invalidResources.add(requestedRI);
       }
+    }
+    return new MaxResourceValidationResult(resReq, invalidResources);
+  }
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Requested resource value after conversion: " +
-                requestedResourceValue);
-        LOG.info("Available resource value after conversion: " +
-                availableResourceValue);
-      }
+  /**
+   * Checks requested ResouceInformation against available Resource.
+   * @param requestedRI
+   * @param availableResource
+   * @return true if request is valid, false otherwise.
+   */
+  private static boolean checkResource(
+      ResourceInformation requestedRI, Resource availableResource) {
+    final ResourceInformation availableRI =
+        availableResource.getResourceInformation(requestedRI.getName());
 
-      if (requestedResourceValue > availableResourceValue) {
-        throwInvalidResourceException(reqResource, availableResource,
-            reqResourceName);
-      }
+    long requestedResourceValue = requestedRI.getValue();
+    long availableResourceValue = availableRI.getValue();
+    int unitsRelation = UnitsConversionUtil.compareUnits(requestedRI.getUnits(),
+        availableRI.getUnits());
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Requested resource information: " + requestedRI);
+      LOG.debug("Available resource information: " + availableRI);
+      LOG.debug("Relation of units: " + unitsRelation);
+    }
+
+    // requested resource unit is less than available resource unit
+    // e.g. requestedUnit: "m", availableUnit: "K")
+    if (unitsRelation < 0) {
+      availableResourceValue =
+          UnitsConversionUtil.convert(availableRI.getUnits(),
+              requestedRI.getUnits(), availableRI.getValue());
+
+      // requested resource unit is greater than available resource unit
+      // e.g. requestedUnit: "G", availableUnit: "M")
+    } else if (unitsRelation > 0) {
+      requestedResourceValue =
+          UnitsConversionUtil.convert(requestedRI.getUnits(),
+              availableRI.getUnits(), requestedRI.getValue());
     }
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Requested resource value after conversion: "
+          + requestedResourceValue);
+      LOG.info("Available resource value after conversion: "
+          + availableResourceValue);
+    }
+
+    return requestedResourceValue <= availableResourceValue;
   }
 
   private static void throwInvalidResourceException(Resource reqResource,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f48fec83/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 0305702..281aded 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -459,7 +459,6 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
       // Add it to allContainers list.
       addToNewlyAllocatedContainers(node, rmContainer);
       liveContainers.put(container.getId(), rmContainer);
-
       // Update consumption and track allocations
       ContainerRequest containerRequest = appSchedulingInfo.allocate(
           type, node, schedulerKey, container);
@@ -867,6 +866,12 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
         if (reserved) {
           unreserve(schedulerKey, node);
         }
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(String.format(
+              "Resource ask %s fits in available node resources %s, " +
+                      "but no container was allocated",
+              capability, available));
+        }
         return Resources.none();
       }
 
@@ -1096,7 +1101,8 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
     } else if (!getQueue().fitsInMaxShare(resource)) {
       // The requested container must fit in queue maximum share
       updateAMDiagnosticMsg(resource,
-          " exceeds current queue or its parents maximum resource allowed).");
+          " exceeds current queue or its parents maximum resource allowed). " +
+                  "Max share of queue: " + getQueue().getMaxShare());
 
       ret = false;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f48fec83/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
index a8e53fc..26c5630 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
@@ -182,6 +182,9 @@ public class FSParentQueue extends FSQueue {
 
     // If this queue is over its limit, reject
     if (!assignContainerPreCheck(node)) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Assign container precheck on node " + node + " failed");
+      }
       return assigned;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f48fec83/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 1f85814..1c4bd51 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
@@ -42,6 +43,8 @@ import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions
+        .SchedulerInvalidResoureRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
@@ -73,6 +76,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerUpdates;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils.MaxResourceValidationResult;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent;
@@ -449,10 +453,7 @@ public class FairScheduler extends
       String message =
           "Reject application " + applicationId + " submitted by user " + user
               + " with an empty queue name.";
-      LOG.info(message);
-      rmContext.getDispatcher().getEventHandler().handle(
-          new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED,
-              message));
+      rejectApplicationWithMessage(applicationId, message);
       return;
     }
 
@@ -461,10 +462,7 @@ public class FairScheduler extends
           "Reject application " + applicationId + " submitted by user " + user
               + " with an illegal queue name " + queueName + ". "
               + "The queue name cannot start/end with period.";
-      LOG.info(message);
-      rmContext.getDispatcher().getEventHandler().handle(
-          new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED,
-              message));
+      rejectApplicationWithMessage(applicationId, message);
       return;
     }
 
@@ -476,6 +474,31 @@ public class FairScheduler extends
         return;
       }
 
+      if (rmApp != null && rmApp.getAMResourceRequests() != null) {
+        // Resources.fitsIn would always return false when queueMaxShare is 0
+        // for any resource, but only using Resources.fitsIn is not enough
+        // is it would return false for such cases when the requested
+        // resource is smaller than the max resource but that max resource is
+        // not zero, e.g. requested vCores = 2, max vCores = 1.
+        // With this check, we only reject those applications where resource
+        // requested is greater than 0 and we have 0
+        // of that resource on the queue.
+        List<MaxResourceValidationResult> invalidAMResourceRequests =
+                validateResourceRequests(rmApp.getAMResourceRequests(), queue);
+
+        if (!invalidAMResourceRequests.isEmpty()) {
+          String msg = String.format(
+                  "Cannot submit application %s to queue %s because "
+                          + "it has zero amount of resource for a requested "
+                          + "resource! Invalid requested AM resources: %s, "
+                          + "maximum queue resources: %s",
+                  applicationId, queue.getName(),
+                  invalidAMResourceRequests, queue.getMaxShare());
+          rejectApplicationWithMessage(applicationId, msg);
+          return;
+        }
+      }
+
       // Enforce ACLs
       UserGroupInformation userUgi = UserGroupInformation.createRemoteUser(
           user);
@@ -485,9 +508,7 @@ public class FairScheduler extends
         String msg = "User " + userUgi.getUserName()
             + " cannot submit applications to queue " + queue.getName()
             + "(requested queuename is " + queueName + ")";
-        LOG.info(msg);
-        rmContext.getDispatcher().getEventHandler().handle(
-            new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED, msg));
+        rejectApplicationWithMessage(applicationId, msg);
         return;
       }
 
@@ -604,10 +625,7 @@ public class FairScheduler extends
     }
 
     if (appRejectMsg != null && rmApp != null) {
-      LOG.error(appRejectMsg);
-      rmContext.getDispatcher().getEventHandler().handle(
-          new RMAppEvent(rmApp.getApplicationId(),
-              RMAppEventType.APP_REJECTED, appRejectMsg));
+      rejectApplicationWithMessage(rmApp.getApplicationId(), appRejectMsg);
       return null;
     }
 
@@ -834,7 +852,6 @@ public class FairScheduler extends
       List<ResourceRequest> ask, List<SchedulingRequest> schedulingRequests,
       List<ContainerId> release, List<String> blacklistAdditions,
       List<String> blacklistRemovals, ContainerUpdates updateRequests) {
-
     // Make sure this application exists
     FSAppAttempt application = getSchedulerApp(appAttemptId);
     if (application == null) {
@@ -854,6 +871,24 @@ public class FairScheduler extends
       return EMPTY_ALLOCATION;
     }
 
+    ApplicationId applicationId = application.getApplicationId();
+    FSLeafQueue queue = application.getQueue();
+    List<MaxResourceValidationResult> invalidAsks =
+            validateResourceRequests(ask, queue);
+
+    // We need to be fail-fast here if any invalid ask is detected.
+    // If we would have thrown exception later, this could be problematic as
+    // tokens and promoted / demoted containers would have been lost because
+    // scheduler would clear them right away and AM
+    // would not get this information.
+    if (!invalidAsks.isEmpty()) {
+      throw new SchedulerInvalidResoureRequestException(String.format(
+              "Resource request is invalid for application %s because queue %s "
+                      + "has 0 amount of resource for a resource type! "
+                      + "Validation result: %s",
+              applicationId, queue.getName(), invalidAsks));
+    }
+
     // Handle promotions and demotions
     handleContainerUpdates(application, updateRequests);
 
@@ -912,6 +947,7 @@ public class FairScheduler extends
 
     Resource headroom = application.getHeadroom();
     application.setApplicationHeadroomForMetrics(headroom);
+
     return new Allocation(newlyAllocatedContainers, headroom,
         preemptionContainerIds, null, null,
         application.pullUpdatedNMTokens(), null, null,
@@ -920,6 +956,34 @@ public class FairScheduler extends
         application.pullPreviousAttemptContainers());
   }
 
+  private List<MaxResourceValidationResult> validateResourceRequests(
+      List<ResourceRequest> requests, FSLeafQueue queue) {
+    List<MaxResourceValidationResult> validationResults = Lists.newArrayList();
+
+    for (ResourceRequest resourceRequest : requests) {
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Validating resource request: " + resourceRequest);
+      }
+
+      MaxResourceValidationResult validationResult =
+              SchedulerUtils.validateResourceRequestsAgainstQueueMaxResource(
+                      resourceRequest, queue.getMaxShare());
+      if (!validationResult.isValid()) {
+        validationResults.add(validationResult);
+        LOG.warn(String.format("Queue %s cannot handle resource request" +
+                        "because it has zero available amount of resource " +
+                        "for a requested resource type, " +
+                        "so the resource request is ignored!"
+                        + " Requested resources: %s, " +
+                        "maximum queue resources: %s",
+                queue.getName(), resourceRequest.getCapability(),
+                queue.getMaxShare()));
+      }
+    }
+
+    return validationResults;
+  }
+
   @Override
   protected void nodeUpdate(RMNode nm) {
     try {
@@ -1060,9 +1124,14 @@ public class FairScheduler extends
         Resource assignedResource = Resources.clone(Resources.none());
         Resource maxResourcesToAssign = Resources.multiply(
             node.getUnallocatedResource(), 0.5f);
+
         while (node.getReservedContainer() == null) {
           Resource assignment = queueMgr.getRootQueue().assignContainer(node);
+
           if (assignment.equals(Resources.none())) {
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("No container is allocated on node " + node);
+            }
             break;
           }
 
@@ -1254,9 +1323,7 @@ public class FairScheduler extends
           String message = "Application " + applicationId
               + " submitted to a reservation which is not yet "
               + "currently active: " + resQName;
-          this.rmContext.getDispatcher().getEventHandler().handle(
-              new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED,
-                  message));
+          rejectApplicationWithMessage(applicationId, message);
           return null;
         }
         if (!queue.getParent().getQueueName().equals(queueName)) {
@@ -1264,9 +1331,7 @@ public class FairScheduler extends
               "Application: " + applicationId + " submitted to a reservation "
                   + resQName + " which does not belong to the specified queue: "
                   + queueName;
-          this.rmContext.getDispatcher().getEventHandler().handle(
-              new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED,
-                  message));
+          rejectApplicationWithMessage(applicationId, message);
           return null;
         }
         // use the reservation queue to run the app
@@ -1279,7 +1344,13 @@ public class FairScheduler extends
     } finally {
       readLock.unlock();
     }
+  }
 
+  private void rejectApplicationWithMessage(ApplicationId applicationId,
+          String msg) {
+    LOG.info(msg);
+    rmContext.getDispatcher().getEventHandler().handle(new RMAppEvent(
+            applicationId, RMAppEventType.APP_REJECTED, msg));
   }
 
   private String getDefaultQueueForPlanQueue(String queueName) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f48fec83/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
index b998564..3ac3849 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
+import com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -57,6 +58,7 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 import java.io.File;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
 
 public class FairSchedulerTestBase {
@@ -163,37 +165,43 @@ public class FairSchedulerTestBase {
   protected ApplicationAttemptId createSchedulingRequest(
       int memory, int vcores, String queueId, String userId, int numContainers,
       int priority) {
-    ApplicationAttemptId id = createAppAttemptId(this.APP_ID++,
-        this.ATTEMPT_ID++);
+    ResourceRequest request = createResourceRequest(memory, vcores,
+            ResourceRequest.ANY, priority, numContainers, true);
+    return createSchedulingRequest(Lists.newArrayList(request), queueId,
+            userId);
+  }
+
+  protected ApplicationAttemptId createSchedulingRequest(
+      Collection<ResourceRequest> requests, String queueId, String userId) {
+    ApplicationAttemptId id =
+        createAppAttemptId(this.APP_ID++, this.ATTEMPT_ID++);
     scheduler.addApplication(id.getApplicationId(), queueId, userId, false);
     // This conditional is for testAclSubmitApplication where app is rejected
     // and no app is added.
-    if (scheduler.getSchedulerApplications().
-        containsKey(id.getApplicationId())) {
+    if (scheduler.getSchedulerApplications()
+        .containsKey(id.getApplicationId())) {
       scheduler.addApplicationAttempt(id, false, false);
     }
-    List<ResourceRequest> ask = new ArrayList<ResourceRequest>();
-    ResourceRequest request = createResourceRequest(memory, vcores,
-        ResourceRequest.ANY, priority, numContainers, true);
-    ask.add(request);
+
+    List<ResourceRequest> ask = new ArrayList<>(requests);
 
     RMApp rmApp = mock(RMApp.class);
     RMAppAttempt rmAppAttempt = mock(RMAppAttempt.class);
     when(rmApp.getCurrentAppAttempt()).thenReturn(rmAppAttempt);
     when(rmAppAttempt.getRMAppAttemptMetrics()).thenReturn(
-        new RMAppAttemptMetrics(id, resourceManager.getRMContext()));
+            new RMAppAttemptMetrics(id, resourceManager.getRMContext()));
     ApplicationSubmissionContext submissionContext =
-        mock(ApplicationSubmissionContext.class);
+            mock(ApplicationSubmissionContext.class);
     when(submissionContext.getUnmanagedAM()).thenReturn(false);
     when(rmAppAttempt.getSubmissionContext()).thenReturn(submissionContext);
     when(rmApp.getApplicationSubmissionContext()).thenReturn(submissionContext);
     Container container = mock(Container.class);
     when(rmAppAttempt.getMasterContainer()).thenReturn(container);
     resourceManager.getRMContext().getRMApps()
-        .put(id.getApplicationId(), rmApp);
+            .put(id.getApplicationId(), rmApp);
 
-    scheduler.allocate(id, ask, null, new ArrayList<ContainerId>(),
-        null, null, NULL_UPDATE_REQUESTS);
+    scheduler.allocate(id, ask, null, new ArrayList<>(),
+            null, null, NULL_UPDATE_REQUESTS);
     scheduler.update();
     return id;
   }
@@ -252,13 +260,36 @@ public class FairSchedulerTestBase {
 
   protected void createApplicationWithAMResource(ApplicationAttemptId attId,
       String queue, String user, Resource amResource) {
+    createApplicationWithAMResourceInternal(attId, queue, user, amResource,
+        null);
+    ApplicationId appId = attId.getApplicationId();
+    addApplication(queue, user, appId);
+    addAppAttempt(attId);
+  }
+
+  protected void createApplicationWithAMResource(ApplicationAttemptId attId,
+      String queue, String user, Resource amResource,
+      List<ResourceRequest> amReqs) {
+    createApplicationWithAMResourceInternal(attId, queue, user, amResource,
+        amReqs);
+    ApplicationId appId = attId.getApplicationId();
+    addApplication(queue, user, appId);
+  }
+
+  private void createApplicationWithAMResourceInternal(
+      ApplicationAttemptId attId, String queue, String user,
+      Resource amResource, List<ResourceRequest> amReqs) {
     RMContext rmContext = resourceManager.getRMContext();
     ApplicationId appId = attId.getApplicationId();
     RMApp rmApp = new RMAppImpl(appId, rmContext, conf, null, user, null,
         ApplicationSubmissionContext.newInstance(appId, null, queue, null,
             mock(ContainerLaunchContext.class), false, false, 0, amResource,
-            null), scheduler, null, 0, null, null, null);
+            null),
+        scheduler, null, 0, null, null, amReqs);
     rmContext.getRMApps().put(appId, rmApp);
+  }
+
+  private void addApplication(String queue, String user, ApplicationId appId) {
     RMAppEvent event = new RMAppEvent(appId, RMAppEventType.START);
     resourceManager.getRMContext().getRMApps().get(appId).handle(event);
     event = new RMAppEvent(appId, RMAppEventType.APP_NEW_SAVED);
@@ -268,8 +299,11 @@ public class FairSchedulerTestBase {
     AppAddedSchedulerEvent appAddedEvent = new AppAddedSchedulerEvent(
         appId, queue, user);
     scheduler.handle(appAddedEvent);
+  }
+
+  private void addAppAttempt(ApplicationAttemptId attId) {
     AppAttemptAddedSchedulerEvent attempAddedEvent =
-        new AppAttemptAddedSchedulerEvent(attId, false);
+            new AppAttemptAddedSchedulerEvent(attId, false);
     scheduler.handle(attempAddedEvent);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f48fec83/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index d9c06a7..2f6c2cf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -41,9 +41,11 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.stream.Collectors;
 
 import javax.xml.parsers.ParserConfigurationException;
 
+import com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ha.HAServiceProtocol;
@@ -62,6 +64,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -69,6 +72,8 @@ import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.Event;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.exceptions
+        .SchedulerInvalidResoureRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
@@ -5414,4 +5419,204 @@ public class TestFairScheduler extends FairSchedulerTestBase {
             SchedulerUtils.COMPLETED_APPLICATION),
         RMContainerEventType.EXPIRE);
   }
+
+  @Test
+  public void testAppRejectedToQueueWithZeroCapacityOfVcores()
+      throws IOException {
+    testAppRejectedToQueueWithZeroCapacityOfResource(
+            ResourceInformation.VCORES_URI);
+  }
+
+  @Test
+  public void testAppRejectedToQueueWithZeroCapacityOfMemory()
+      throws IOException {
+    testAppRejectedToQueueWithZeroCapacityOfResource(
+            ResourceInformation.MEMORY_URI);
+  }
+
+  private void testAppRejectedToQueueWithZeroCapacityOfResource(String resource)
+      throws IOException {
+    conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
+    generateAllocationFileWithZeroResource(resource);
+
+    final List<Event> recordedEvents = Lists.newArrayList();
+
+    RMContext spyContext = Mockito.spy(resourceManager.getRMContext());
+    Dispatcher mockDispatcher = mock(AsyncDispatcher.class);
+    when(mockDispatcher.getEventHandler()).thenReturn((EventHandler) event -> {
+      if (event instanceof RMAppEvent) {
+        recordedEvents.add(event);
+      }
+    });
+    Mockito.doReturn(mockDispatcher).when(spyContext).getDispatcher();
+    ((AsyncDispatcher) mockDispatcher).start();
+
+    scheduler.setRMContext(spyContext);
+
+    scheduler.init(conf);
+    scheduler.start();
+    scheduler.reinitialize(conf, resourceManager.getRMContext());
+
+    // submit app with queue name (queueA)
+    ApplicationAttemptId appAttemptId1 = createAppAttemptId(1, 1);
+
+    ResourceRequest amReqs = ResourceRequest.newBuilder()
+        .capability(Resource.newInstance(5 * GB, 3)).build();
+    createApplicationWithAMResource(appAttemptId1, "queueA", "user1",
+        Resource.newInstance(GB, 1), Lists.newArrayList(amReqs));
+    scheduler.update();
+
+    assertEquals("Exactly one APP_REJECTED event is expected", 1,
+        recordedEvents.size());
+    Event event = recordedEvents.get(0);
+    RMAppEvent rmAppEvent = (RMAppEvent) event;
+    assertEquals(RMAppEventType.APP_REJECTED, rmAppEvent.getType());
+    assertTrue("Diagnostic message does not match: " +
+                    rmAppEvent.getDiagnosticMsg(),
+            rmAppEvent.getDiagnosticMsg()
+        .matches("Cannot submit application application[\\d_]+ to queue "
+            + "root.queueA because it has zero amount of resource "
+            + "for a requested resource! " +
+                "Invalid requested AM resources: .+, "
+            + "maximum queue resources: .+"));
+  }
+
+  private void generateAllocationFileWithZeroResource(String resource)
+      throws IOException {
+    PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
+    out.println("<?xml version=\"1.0\"?>");
+    out.println("<allocations>");
+    out.println("<queue name=\"queueA\">");
+
+    String resources = "";
+    if (resource.equals(ResourceInformation.MEMORY_URI)) {
+      resources = "0 mb,2vcores";
+    } else if (resource.equals(ResourceInformation.VCORES_URI)) {
+      resources = "10000 mb,0vcores";
+    }
+    out.println("<minResources>" + resources + "</minResources>");
+    out.println("<maxResources>" + resources + "</maxResources>");
+    out.println("<weight>2.0</weight>");
+    out.println("</queue>");
+    out.println("<queue name=\"queueB\">");
+    out.println("<minResources>1 mb 1 vcores</minResources>");
+    out.println("<weight>0.0</weight>");
+    out.println("</queue>");
+    out.println("</allocations>");
+    out.close();
+  }
+
+  @Test
+  public void testSchedulingRejectedToQueueWithZeroCapacityOfMemory()
+      throws IOException {
+    // This request is not valid as queue will have 0 capacity of memory and
+    // the requests asks 2048M
+    ResourceRequest invalidRequest =
+        createResourceRequest(2048, 2, ResourceRequest.ANY, 1, 2, true);
+
+    ResourceRequest validRequest =
+        createResourceRequest(0, 0, ResourceRequest.ANY, 1, 2, true);
+    testSchedulingRejectedToQueueZeroCapacityOfResource(
+        ResourceInformation.MEMORY_URI,
+        Lists.newArrayList(invalidRequest, validRequest));
+  }
+
+  @Test
+  public void testSchedulingAllowedToQueueWithZeroCapacityOfMemory()
+      throws IOException {
+    testSchedulingAllowedToQueueZeroCapacityOfResource(
+        ResourceInformation.MEMORY_URI, 0, 2);
+  }
+
+  @Test
+  public void testSchedulingRejectedToQueueWithZeroCapacityOfVcores()
+      throws IOException {
+    // This request is not valid as queue will have 0 capacity of vCores and
+    // the requests asks 1
+    ResourceRequest invalidRequest =
+        createResourceRequest(0, 1, ResourceRequest.ANY, 1, 2, true);
+
+    ResourceRequest validRequest =
+        createResourceRequest(0, 0, ResourceRequest.ANY, 1, 2, true);
+
+    testSchedulingRejectedToQueueZeroCapacityOfResource(
+        ResourceInformation.VCORES_URI,
+        Lists.newArrayList(invalidRequest, validRequest));
+  }
+
+  @Test
+  public void testSchedulingAllowedToQueueWithZeroCapacityOfVcores()
+      throws IOException {
+    testSchedulingAllowedToQueueZeroCapacityOfResource(
+            ResourceInformation.VCORES_URI, 2048, 0);
+  }
+
+  private void testSchedulingRejectedToQueueZeroCapacityOfResource(
+      String resource, Collection<ResourceRequest> requests)
+      throws IOException {
+    conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
+    generateAllocationFileWithZeroResource(resource);
+
+    scheduler.init(conf);
+    scheduler.start();
+    scheduler.reinitialize(conf, resourceManager.getRMContext());
+
+    // Add a node
+    RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(2048, 2));
+    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
+    scheduler.handle(nodeEvent1);
+
+    try {
+      createSchedulingRequest(requests, "queueA", "user1");
+      fail("Exception is expected because the queue has zero capacity of "
+          + resource + " and requested resource capabilities are: "
+          + requests.stream().map(ResourceRequest::getCapability)
+              .collect(Collectors.toList()));
+    } catch (SchedulerInvalidResoureRequestException e) {
+      assertTrue(
+          "The thrown exception is not the expected one. Exception message: "
+              + e.getMessage(),
+          e.getMessage()
+              .matches("Resource request is invalid for application "
+                  + "application[\\d_]+ because queue root\\.queueA has 0 "
+                  + "amount of resource for a resource type! "
+                  + "Validation result:.*"));
+
+      List<ApplicationAttemptId> appsInQueue =
+          scheduler.getAppsInQueue("queueA");
+      assertEquals("Number of apps in queue 'queueA' should be one!", 1,
+          appsInQueue.size());
+
+      ApplicationAttemptId appAttemptId =
+          scheduler.getAppsInQueue("queueA").get(0);
+      assertNotNull(
+          "Scheduler app for appAttemptId " + appAttemptId
+              + " should not be null!",
+          scheduler.getSchedulerApp(appAttemptId));
+
+      FSAppAttempt schedulerApp = scheduler.getSchedulerApp(appAttemptId);
+      assertNotNull("Scheduler app queueInfo for appAttemptId " + appAttemptId
+          + " should not be null!", schedulerApp.getAppSchedulingInfo());
+
+      assertTrue("There should be no requests accepted", schedulerApp
+          .getAppSchedulingInfo().getAllResourceRequests().isEmpty());
+    }
+  }
+
+  private void testSchedulingAllowedToQueueZeroCapacityOfResource(
+          String resource, int memory, int vCores) throws IOException {
+    conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
+    generateAllocationFileWithZeroResource(resource);
+
+    scheduler.init(conf);
+    scheduler.start();
+    scheduler.reinitialize(conf, resourceManager.getRMContext());
+
+    // Add a node
+    RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(2048, 2));
+    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
+    scheduler.handle(nodeEvent1);
+
+    createSchedulingRequest(memory, vCores, "queueA", "user1", 1, 2);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: HDFS-13592. TestNameNodePrunesMissingStorages#testNameNodePrunesUnreportedStorages does not shut down cluster properly. Contributed by Anbang Hu.

Posted by ar...@apache.org.
HDFS-13592. TestNameNodePrunesMissingStorages#testNameNodePrunesUnreportedStorages does not shut down cluster properly. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57b893de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57b893de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57b893de

Branch: refs/heads/HDDS-48
Commit: 57b893de3d36d20f65ee81b5cc3cfef12594b75b
Parents: 6e99686
Author: Inigo Goiri <in...@apache.org>
Authored: Fri May 18 09:36:58 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri May 18 09:36:58 2018 -0700

----------------------------------------------------------------------
 .../TestNameNodePrunesMissingStorages.java      | 97 +++++++++++---------
 1 file changed, 53 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57b893de/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
index 948a8fb..96d227d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
@@ -383,51 +383,60 @@ public class TestNameNodePrunesMissingStorages {
         .Builder(conf).numDataNodes(1)
         .storagesPerDatanode(2)
         .build();
-    // Create two files to ensure each storage has a block
-    DFSTestUtil.createFile(cluster.getFileSystem(), new Path("file1"),
-        102400, 102400, 102400, (short)1,
-        0x1BAD5EE);
-    DFSTestUtil.createFile(cluster.getFileSystem(), new Path("file2"),
-        102400, 102400, 102400, (short)1,
-        0x1BAD5EED);
-    // Get the datanode storages and data directories
-    DataNode dn = cluster.getDataNodes().get(0);
-    BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
-    DatanodeDescriptor dnDescriptor = bm.getDatanodeManager().
-        getDatanode(cluster.getDataNodes().get(0).getDatanodeUuid());
-    DatanodeStorageInfo[] dnStoragesInfosBeforeRestart =
-        dnDescriptor.getStorageInfos();
-    Collection<String> oldDirs =  new ArrayList<String>(dn.getConf().
-        getTrimmedStringCollection(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
-    // Keep the first data directory and remove the second.
-    String newDirs = oldDirs.iterator().next();
-    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
-    // Restart the datanode with the new conf
-    cluster.stopDataNode(0);
-    cluster.startDataNodes(conf, 1, false, null, null);
-    dn = cluster.getDataNodes().get(0);
-    cluster.waitActive();
-    // Assert that the dnDescriptor has both the storages after restart
-    assertArrayEquals(dnStoragesInfosBeforeRestart,
-        dnDescriptor.getStorageInfos());
-    // Assert that the removed storage is marked as FAILED
-    // when DN heartbeats to the NN
-    int numFailedStoragesWithBlocks = 0;
-    DatanodeStorageInfo failedStorageInfo = null;
-    for (DatanodeStorageInfo dnStorageInfo: dnDescriptor.getStorageInfos()) {
-      if (dnStorageInfo.areBlocksOnFailedStorage()) {
-        numFailedStoragesWithBlocks++;
-        failedStorageInfo = dnStorageInfo;
+    try {
+      cluster.waitActive();
+      // Create two files to ensure each storage has a block
+      DFSTestUtil.createFile(cluster.getFileSystem(), new Path("file1"),
+          102400, 102400, 102400, (short)1,
+          0x1BAD5EE);
+      DFSTestUtil.createFile(cluster.getFileSystem(), new Path("file2"),
+          102400, 102400, 102400, (short)1,
+          0x1BAD5EED);
+      // Get the datanode storages and data directories
+      DataNode dn = cluster.getDataNodes().get(0);
+      BlockManager bm =
+          cluster.getNameNode().getNamesystem().getBlockManager();
+      DatanodeDescriptor dnDescriptor = bm.getDatanodeManager().
+          getDatanode(cluster.getDataNodes().get(0).getDatanodeUuid());
+      DatanodeStorageInfo[] dnStoragesInfosBeforeRestart =
+          dnDescriptor.getStorageInfos();
+      Collection<String> oldDirs =  new ArrayList<String>(dn.getConf().
+          getTrimmedStringCollection(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
+      // Keep the first data directory and remove the second.
+      String newDirs = oldDirs.iterator().next();
+      conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
+      // Restart the datanode with the new conf
+      cluster.stopDataNode(0);
+      cluster.startDataNodes(conf, 1, false, null, null);
+      dn = cluster.getDataNodes().get(0);
+      cluster.waitActive();
+      // Assert that the dnDescriptor has both the storages after restart
+      assertArrayEquals(dnStoragesInfosBeforeRestart,
+          dnDescriptor.getStorageInfos());
+      // Assert that the removed storage is marked as FAILED
+      // when DN heartbeats to the NN
+      int numFailedStoragesWithBlocks = 0;
+      DatanodeStorageInfo failedStorageInfo = null;
+      for (DatanodeStorageInfo dnStorageInfo: dnDescriptor.getStorageInfos()) {
+        if (dnStorageInfo.areBlocksOnFailedStorage()) {
+          numFailedStoragesWithBlocks++;
+          failedStorageInfo = dnStorageInfo;
+        }
+      }
+      assertEquals(1, numFailedStoragesWithBlocks);
+      // Heartbeat manager removes the blocks associated with this failed
+      // storage
+      bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
+      assertTrue(!failedStorageInfo.areBlocksOnFailedStorage());
+      // pruneStorageMap removes the unreported storage
+      cluster.triggerHeartbeats();
+      // Assert that the unreported storage is pruned
+      assertEquals(DataNode.getStorageLocations(dn.getConf()).size(),
+          dnDescriptor.getStorageInfos().length);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
       }
     }
-    assertEquals(1, numFailedStoragesWithBlocks);
-    // Heartbeat manager removes the blocks associated with this failed storage
-    bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
-    assertTrue(!failedStorageInfo.areBlocksOnFailedStorage());
-    // pruneStorageMap removes the unreported storage
-    cluster.triggerHeartbeats();
-    // Assert that the unreported storage is pruned
-    assertEquals(DataNode.getStorageLocations(dn.getConf()).size(),
-        dnDescriptor.getStorageInfos().length);
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: HDFS-13556. TestNestedEncryptionZones does not shut down cluster. Contributed by Anbang Hu.

Posted by ar...@apache.org.
HDFS-13556. TestNestedEncryptionZones does not shut down cluster. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a97a2042
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a97a2042
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a97a2042

Branch: refs/heads/HDDS-48
Commit: a97a2042f210e9db97646baad6f56064d672f447
Parents: 7c485a6
Author: Inigo Goiri <in...@apache.org>
Authored: Thu May 17 16:53:23 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Thu May 17 16:53:23 2018 -0700

----------------------------------------------------------------------
 .../hdfs/server/namenode/TestNestedEncryptionZones.java     | 9 +++++++++
 1 file changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a97a2042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNestedEncryptionZones.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNestedEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNestedEncryptionZones.java
index 59d980c..92187d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNestedEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNestedEncryptionZones.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -111,6 +112,14 @@ public class TestNestedEncryptionZones {
     DFSTestUtil.createKey(NESTED_EZ_KEY, cluster, conf);
   }
 
+  @After
+  public void tearDown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
   @Test(timeout = 60000)
   public void testNestedEncryptionZones() throws Exception {
     initTopEZDirAndNestedEZDir(new Path(rootDir, "topEZ"));


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: HADOOP-15154. Abstract new method assertCapability for StreamCapabilities testing. Contributed by Zsolt Venczel.

Posted by ar...@apache.org.
HADOOP-15154. Abstract new method assertCapability for StreamCapabilities testing. Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89f59113
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89f59113
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89f59113

Branch: refs/heads/HDDS-48
Commit: 89f59113927dd886f09d8fe2c05ff2cd5d1390c1
Parents: 9775ecb
Author: Xiao Chen <xi...@apache.org>
Authored: Fri May 18 11:28:48 2018 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Fri May 18 11:29:20 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/crypto/TestCryptoStreams.java | 39 +++++++++++++-------
 .../hadoop/fs/contract/ContractTestUtils.java   | 29 +++++++++++++++
 2 files changed, 54 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89f59113/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
index 2172d8a..cd7391a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
@@ -44,8 +44,7 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertCapabilities;
 
 public class TestCryptoStreams extends CryptoStreamsTestBase {
   /**
@@ -419,21 +418,33 @@ public class TestCryptoStreams extends CryptoStreamsTestBase {
     // verify hasCapability returns what FakeOutputStream is set up for
     CryptoOutputStream cos =
         (CryptoOutputStream) getOutputStream(defaultBufferSize, key, iv);
-    assertTrue(cos instanceof StreamCapabilities);
-    assertTrue(cos.hasCapability(StreamCapabilities.HFLUSH));
-    assertTrue(cos.hasCapability(StreamCapabilities.HSYNC));
-    assertTrue(cos.hasCapability(StreamCapabilities.DROPBEHIND));
-    assertFalse(cos.hasCapability(StreamCapabilities.READAHEAD));
-    assertFalse(cos.hasCapability(StreamCapabilities.UNBUFFER));
+
+    assertCapabilities(cos,
+        new String[] {
+            StreamCapabilities.HFLUSH,
+            StreamCapabilities.HSYNC,
+            StreamCapabilities.DROPBEHIND
+        },
+        new String[] {
+            StreamCapabilities.READAHEAD,
+            StreamCapabilities.UNBUFFER
+        }
+    );
 
     // verify hasCapability for input stream
     CryptoInputStream cis =
         (CryptoInputStream) getInputStream(defaultBufferSize, key, iv);
-    assertTrue(cis instanceof StreamCapabilities);
-    assertTrue(cis.hasCapability(StreamCapabilities.DROPBEHIND));
-    assertTrue(cis.hasCapability(StreamCapabilities.READAHEAD));
-    assertTrue(cis.hasCapability(StreamCapabilities.UNBUFFER));
-    assertFalse(cis.hasCapability(StreamCapabilities.HFLUSH));
-    assertFalse(cis.hasCapability(StreamCapabilities.HSYNC));
+
+    assertCapabilities(cis,
+        new String[] {
+            StreamCapabilities.DROPBEHIND,
+            StreamCapabilities.READAHEAD,
+            StreamCapabilities.UNBUFFER
+        },
+        new String[] {
+            StreamCapabilities.HFLUSH,
+            StreamCapabilities.HSYNC
+        }
+    );
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89f59113/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index 54d015a..38a6fb1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.StreamCapabilities;
 import org.apache.hadoop.io.IOUtils;
 import org.junit.Assert;
 import org.junit.internal.AssumptionViolatedException;
@@ -1437,6 +1438,34 @@ public class ContractTestUtils extends Assert {
     return list;
   }
 
+  /**
+   * Custom assert to test {@link StreamCapabilities}.
+   *
+   * @param stream The stream to test for StreamCapabilities
+   * @param shouldHaveCapabilities The array of expected capabilities
+   * @param shouldNotHaveCapabilities The array of unexpected capabilities
+   */
+  public static void assertCapabilities(
+      Object stream, String[] shouldHaveCapabilities,
+      String[] shouldNotHaveCapabilities) {
+    assertTrue("Stream should be instanceof StreamCapabilities",
+        stream instanceof StreamCapabilities);
+
+    if (shouldHaveCapabilities!=null) {
+      for (String shouldHaveCapability : shouldHaveCapabilities) {
+        assertTrue("Should have capability: " + shouldHaveCapability,
+            ((StreamCapabilities) stream).hasCapability(shouldHaveCapability));
+      }
+    }
+
+    if (shouldNotHaveCapabilities!=null) {
+      for (String shouldNotHaveCapability : shouldNotHaveCapabilities) {
+        assertFalse("Should not have capability: " + shouldNotHaveCapability,
+            ((StreamCapabilities) stream)
+                .hasCapability(shouldNotHaveCapability));
+      }
+    }
+  }
 
   /**
    * Results of recursive directory creation/scan operations.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: HDDS-74. Rename name of properties related to configuration tags. Contributed by Sandeep Nemuri.

Posted by ar...@apache.org.
HDDS-74. Rename name of properties related to configuration tags.
Contributed by Sandeep Nemuri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60821fb2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60821fb2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60821fb2

Branch: refs/heads/HDDS-48
Commit: 60821fb20ecee55735ddd0a379cb64841ccb1e2e
Parents: 481bfdb
Author: Anu Engineer <ae...@apache.org>
Authored: Tue May 22 11:38:11 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue May 22 11:38:11 2018 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java   | 2 +-
 hadoop-hdds/common/src/main/resources/ozone-default.xml          | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60821fb2/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
index b8d0b24..521408b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
@@ -154,7 +154,7 @@ public class HddsConfServlet extends HttpServlet {
 
     switch (cmd) {
     case "getOzoneTags":
-      out.write(gson.toJson(config.get("ozone.system.tags").split(",")));
+      out.write(gson.toJson(config.get("ozone.tags.system").split(",")));
       break;
     case "getPropertyByTag":
       String tags = request.getParameter("tags");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60821fb2/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 648ba05..e0aca67 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1006,12 +1006,12 @@
   </property>
 
   <property>
-    <name>hadoop.custom.tags</name>
+    <name>hadoop.tags.custom</name>
     <value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,KSM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE</value>
   </property>
 
   <property>
-    <name>ozone.system.tags</name>
+    <name>ozone.tags.system</name>
     <value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,KSM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE</value>
   </property>
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: HADOOP-15478. WASB: hflush() and hsync() regression. Contributed by Thomas Marquardt.

Posted by ar...@apache.org.
HADOOP-15478. WASB: hflush() and hsync() regression.
Contributed by Thomas Marquardt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba842847
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba842847
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba842847

Branch: refs/heads/HDDS-48
Commit: ba842847c94d31d3f737226d954c566b5d88656b
Parents: a23ff8d
Author: Steve Loughran <st...@apache.org>
Authored: Mon May 21 11:02:01 2018 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Mon May 21 11:12:34 2018 +0100

----------------------------------------------------------------------
 .../hadoop/fs/azure/PageBlobOutputStream.java   |  13 +-
 .../fs/azure/SyncableDataOutputStream.java      |   4 -
 .../fs/azure/ITestOutputStreamSemantics.java    | 385 +++++++++++++++++++
 3 files changed, 397 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba842847/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
index b2b34f8..68ddcdf 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
@@ -376,6 +376,18 @@ final class PageBlobOutputStream extends OutputStream implements Syncable {
     outBuffer = new ByteArrayOutputStream();
   }
 
+  @VisibleForTesting
+  synchronized void waitForLastFlushCompletion() throws IOException {
+    try {
+      if (lastQueuedTask != null) {
+        lastQueuedTask.waitTillDone();
+      }
+    } catch (InterruptedException e1) {
+      // Restore the interrupted status
+      Thread.currentThread().interrupt();
+    }
+  }
+
   /**
    * Extend the page blob file if we are close to the end.
    */
@@ -554,7 +566,6 @@ final class PageBlobOutputStream extends OutputStream implements Syncable {
   }
 
   @Override
-
   public void hflush() throws IOException {
 
     // hflush is required to force data to storage, so call hsync,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba842847/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java
index fc8796b..dcfff2f 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java
@@ -61,8 +61,6 @@ public class SyncableDataOutputStream extends DataOutputStream
   public void hflush() throws IOException {
     if (out instanceof Syncable) {
       ((Syncable) out).hflush();
-    } else {
-      out.flush();
     }
   }
 
@@ -70,8 +68,6 @@ public class SyncableDataOutputStream extends DataOutputStream
   public void hsync() throws IOException {
     if (out instanceof Syncable) {
       ((Syncable) out).hsync();
-    } else {
-      out.flush();
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba842847/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestOutputStreamSemantics.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestOutputStreamSemantics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestOutputStreamSemantics.java
new file mode 100644
index 0000000..9ac1f73
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestOutputStreamSemantics.java
@@ -0,0 +1,385 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.Random;
+
+import com.microsoft.azure.storage.blob.BlockEntry;
+import com.microsoft.azure.storage.blob.BlockListingFilter;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+
+import org.hamcrest.core.IsEqual;
+import org.hamcrest.core.IsNot;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+import static org.junit.Assume.assumeNotNull;
+
+/**
+ * Test semantics of functions flush, hflush, hsync, and close for block blobs,
+ * block blobs with compaction, and page blobs.
+ */
+public class ITestOutputStreamSemantics extends AbstractWasbTestBase {
+
+  private static final String PAGE_BLOB_DIR = "/pageblob";
+  private static final String BLOCK_BLOB_DIR = "/blockblob";
+  private static final String BLOCK_BLOB_COMPACTION_DIR = "/compaction";
+
+  private byte[] getRandomBytes() {
+    byte[] buffer = new byte[PageBlobFormatHelpers.PAGE_SIZE
+        - PageBlobFormatHelpers.PAGE_HEADER_SIZE];
+    Random rand = new Random();
+    rand.nextBytes(buffer);
+    return buffer;
+  }
+
+  private Path getBlobPathWithTestName(String parentDir) {
+    return new Path(parentDir + "/" + methodName.getMethodName());
+  }
+
+  private void validate(Path path, byte[] writeBuffer, boolean isEqual)
+      throws IOException {
+    String blobPath = path.toUri().getPath();
+    try (FSDataInputStream inputStream = fs.open(path)) {
+      byte[] readBuffer = new byte[PageBlobFormatHelpers.PAGE_SIZE
+          - PageBlobFormatHelpers.PAGE_HEADER_SIZE];
+      int numBytesRead = inputStream.read(readBuffer, 0, readBuffer.length);
+
+      if (isEqual) {
+        assertArrayEquals(
+            String.format("Bytes read do not match bytes written to %1$s",
+                blobPath),
+            writeBuffer,
+            readBuffer);
+      } else {
+        assertThat(
+            String.format("Bytes read unexpectedly match bytes written to %1$s",
+                blobPath),
+            readBuffer,
+            IsNot.not(IsEqual.equalTo(writeBuffer)));
+      }
+    }
+  }
+
+  private boolean isBlockBlobAppendStreamWrapper(FSDataOutputStream stream) {
+    return
+    ((SyncableDataOutputStream)
+        ((NativeAzureFileSystem.NativeAzureFsOutputStream)
+            stream.getWrappedStream())
+            .getOutStream())
+        .getOutStream()
+        instanceof  BlockBlobAppendStream;
+  }
+
+  private boolean isPageBlobStreamWrapper(FSDataOutputStream stream) {
+    return
+        ((SyncableDataOutputStream) stream.getWrappedStream())
+        .getOutStream()
+            instanceof  PageBlobOutputStream;
+  }
+
+  @Override
+  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+    Configuration conf = new Configuration();
+
+    // Configure the page blob directories
+    conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES, PAGE_BLOB_DIR);
+
+    // Configure the block blob with compaction directories
+    conf.set(AzureNativeFileSystemStore.KEY_BLOCK_BLOB_WITH_COMPACTION_DIRECTORIES,
+        BLOCK_BLOB_COMPACTION_DIR);
+
+    return AzureBlobStorageTestAccount.create(
+        "",
+        EnumSet.of(AzureBlobStorageTestAccount.CreateOptions.CreateContainer),
+        conf);
+  }
+
+  // Verify flush writes data to storage for Page Blobs
+  @Test
+  public void testPageBlobFlush() throws IOException {
+    Path path = getBlobPathWithTestName(PAGE_BLOB_DIR);
+
+    try (FSDataOutputStream stream = fs.create(path)) {
+      byte[] buffer = getRandomBytes();
+      stream.write(buffer);
+      stream.flush();
+
+      // flush is asynchronous for Page Blob, so we need to
+      // wait for it to complete
+      SyncableDataOutputStream syncStream =
+          (SyncableDataOutputStream) stream.getWrappedStream();
+      PageBlobOutputStream pageBlobStream =
+          (PageBlobOutputStream)syncStream.getOutStream();
+      pageBlobStream.waitForLastFlushCompletion();
+
+      validate(path, buffer, true);
+    }
+  }
+
+
+  // Verify hflush writes data to storage for Page Blobs
+  @Test
+  public void testPageBlobHFlush() throws IOException {
+    Path path = getBlobPathWithTestName(PAGE_BLOB_DIR);
+
+    try (FSDataOutputStream stream = fs.create(path)) {
+      assertTrue(isPageBlobStreamWrapper(stream));
+      byte[] buffer = getRandomBytes();
+      stream.write(buffer);
+      stream.hflush();
+      validate(path, buffer, true);
+    }
+  }
+
+  // HSync must write data to storage for Page Blobs
+  @Test
+  public void testPageBlobHSync() throws IOException {
+    Path path = getBlobPathWithTestName(PAGE_BLOB_DIR);
+
+    try (FSDataOutputStream stream = fs.create(path)) {
+      assertTrue(isPageBlobStreamWrapper(stream));
+      byte[] buffer = getRandomBytes();
+      stream.write(buffer);
+      stream.hsync();
+      validate(path, buffer, true);
+    }
+  }
+
+  // Close must write data to storage for Page Blobs
+  @Test
+  public void testPageBlobClose() throws IOException {
+    Path path = getBlobPathWithTestName(PAGE_BLOB_DIR);
+
+    try (FSDataOutputStream stream = fs.create(path)) {
+      assertTrue(isPageBlobStreamWrapper(stream));
+      byte[] buffer = getRandomBytes();
+      stream.write(buffer);
+      stream.close();
+      validate(path, buffer, true);
+    }
+  }
+
+  // Verify flush does not write data to storage for Block Blobs
+  @Test
+  public void testBlockBlobFlush() throws Exception {
+    Path path = getBlobPathWithTestName(BLOCK_BLOB_DIR);
+    byte[] buffer = getRandomBytes();
+
+    try (FSDataOutputStream stream = fs.create(path)) {
+      for (int i = 0; i < 10; i++) {
+        stream.write(buffer);
+        stream.flush();
+      }
+    }
+    String blobPath = path.toUri().getPath();
+    // Create a blob reference to read and validate the block list
+    CloudBlockBlob blob = testAccount.getBlobReference(blobPath.substring(1));
+    // after the stream is closed, the block list should be non-empty
+    ArrayList<BlockEntry> blockList = blob.downloadBlockList(
+        BlockListingFilter.COMMITTED,
+        null,null, null);
+    assertEquals(1, blockList.size());
+  }
+
+  // Verify hflush does not write data to storage for Block Blobs
+  @Test
+  public void testBlockBlobHFlush() throws Exception {
+    Path path = getBlobPathWithTestName(BLOCK_BLOB_DIR);
+    byte[] buffer = getRandomBytes();
+
+    try (FSDataOutputStream stream = fs.create(path)) {
+      for (int i = 0; i < 10; i++) {
+        stream.write(buffer);
+        stream.hflush();
+      }
+    }
+    String blobPath = path.toUri().getPath();
+    // Create a blob reference to read and validate the block list
+    CloudBlockBlob blob = testAccount.getBlobReference(blobPath.substring(1));
+    // after the stream is closed, the block list should be non-empty
+    ArrayList<BlockEntry> blockList = blob.downloadBlockList(
+        BlockListingFilter.COMMITTED,
+        null,null, null);
+    assertEquals(1, blockList.size());
+  }
+
+  // Verify hsync does not write data to storage for Block Blobs
+  @Test
+  public void testBlockBlobHSync() throws Exception {
+    Path path = getBlobPathWithTestName(BLOCK_BLOB_DIR);
+    byte[] buffer = getRandomBytes();
+
+    try (FSDataOutputStream stream = fs.create(path)) {
+      for (int i = 0; i < 10; i++) {
+        stream.write(buffer);
+        stream.hsync();
+      }
+    }
+    String blobPath = path.toUri().getPath();
+    // Create a blob reference to read and validate the block list
+    CloudBlockBlob blob = testAccount.getBlobReference(blobPath.substring(1));
+    // after the stream is closed, the block list should be non-empty
+    ArrayList<BlockEntry> blockList = blob.downloadBlockList(
+        BlockListingFilter.COMMITTED,
+        null,null, null);
+    assertEquals(1, blockList.size());
+  }
+
+  // Close must write data to storage for Block Blobs
+  @Test
+  public void testBlockBlobClose() throws IOException {
+    Path path = getBlobPathWithTestName(BLOCK_BLOB_DIR);
+
+    try (FSDataOutputStream stream = fs.create(path)) {
+      byte[] buffer = getRandomBytes();
+      stream.write(buffer);
+      stream.close();
+      validate(path, buffer, true);
+    }
+  }
+
+  // Verify flush writes data to storage for Block Blobs with compaction
+  @Test
+  public void testBlockBlobCompactionFlush() throws Exception {
+    Path path = getBlobPathWithTestName(BLOCK_BLOB_COMPACTION_DIR);
+    byte[] buffer = getRandomBytes();
+
+    try (FSDataOutputStream stream = fs.create(path)) {
+      assertTrue(isBlockBlobAppendStreamWrapper(stream));
+      for (int i = 0; i < 10; i++) {
+        stream.write(buffer);
+        stream.flush();
+      }
+    }
+    String blobPath = path.toUri().getPath();
+    // Create a blob reference to read and validate the block list
+    CloudBlockBlob blob = testAccount.getBlobReference(blobPath.substring(1));
+    // after the stream is closed, the block list should be non-empty
+    ArrayList<BlockEntry> blockList = blob.downloadBlockList(
+        BlockListingFilter.COMMITTED,
+        null,null, null);
+    assertEquals(1, blockList.size());
+  }
+
+  // Verify hflush writes data to storage for Block Blobs with Compaction
+  @Test
+  public void testBlockBlobCompactionHFlush() throws Exception {
+    Path path = getBlobPathWithTestName(BLOCK_BLOB_COMPACTION_DIR);
+    byte[] buffer = getRandomBytes();
+
+    try (FSDataOutputStream stream = fs.create(path)) {
+      assertTrue(isBlockBlobAppendStreamWrapper(stream));
+      for (int i = 0; i < 10; i++) {
+        stream.write(buffer);
+        stream.hflush();
+      }
+    }
+    String blobPath = path.toUri().getPath();
+    // Create a blob reference to read and validate the block list
+    CloudBlockBlob blob = testAccount.getBlobReference(blobPath.substring(1));
+    // after the stream is closed, the block list should be non-empty
+    ArrayList<BlockEntry> blockList = blob.downloadBlockList(
+        BlockListingFilter.COMMITTED,
+        null,null, null);
+    assertEquals(10, blockList.size());
+  }
+
+  // Verify hsync writes data to storage for Block Blobs with compaction
+  @Test
+  public void testBlockBlobCompactionHSync() throws Exception {
+    Path path = getBlobPathWithTestName(BLOCK_BLOB_COMPACTION_DIR);
+    byte[] buffer = getRandomBytes();
+
+    try (FSDataOutputStream stream = fs.create(path)) {
+      assertTrue(isBlockBlobAppendStreamWrapper(stream));
+      for (int i = 0; i < 10; i++) {
+        stream.write(buffer);
+        stream.hsync();
+      }
+    }
+    String blobPath = path.toUri().getPath();
+    // Create a blob reference to read and validate the block list
+    CloudBlockBlob blob = testAccount.getBlobReference(blobPath.substring(1));
+    // after the stream is closed, the block list should be non-empty
+    ArrayList<BlockEntry> blockList = blob.downloadBlockList(
+        BlockListingFilter.COMMITTED,
+        null,null, null);
+    assertEquals(10, blockList.size());
+  }
+
+  // Close must write data to storage for Block Blobs with compaction
+  @Test
+  public void testBlockBlobCompactionClose() throws IOException {
+    Path path = getBlobPathWithTestName(BLOCK_BLOB_COMPACTION_DIR);
+    try (FSDataOutputStream stream = fs.create(path)) {
+      assertTrue(isBlockBlobAppendStreamWrapper(stream));
+      byte[] buffer = getRandomBytes();
+      stream.write(buffer);
+      stream.close();
+      validate(path, buffer, true);
+    }
+  }
+
+  // A small write does not write data to storage for Page Blobs
+  @Test
+  public void testPageBlobSmallWrite() throws IOException {
+    Path path = getBlobPathWithTestName(PAGE_BLOB_DIR);
+    try (FSDataOutputStream stream = fs.create(path)) {
+      assertTrue(isPageBlobStreamWrapper(stream));
+      byte[] buffer = getRandomBytes();
+      stream.write(buffer);
+      validate(path, buffer, false);
+    }
+  }
+
+  // A small write does not write data to storage for Block Blobs
+  @Test
+  public void testBlockBlobSmallWrite() throws IOException {
+    Path path = getBlobPathWithTestName(BLOCK_BLOB_DIR);
+    try (FSDataOutputStream stream = fs.create(path)) {
+      byte[] buffer = getRandomBytes();
+      stream.write(buffer);
+      validate(path, buffer, false);
+    }
+  }
+
+  // A small write does not write data to storage for Block Blobs
+  // with Compaction
+  @Test
+  public void testBlockBlobCompactionSmallWrite() throws IOException {
+    Path path = getBlobPathWithTestName(BLOCK_BLOB_COMPACTION_DIR);
+    try (FSDataOutputStream stream = fs.create(path)) {
+      assertTrue(isBlockBlobAppendStreamWrapper(stream));
+      byte[] buffer = getRandomBytes();
+      stream.write(buffer);
+      validate(path, buffer, false);
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: HDDS-71. Send ContainerType to Datanode during container creation. Contributed by Bharat Viswanadham.

Posted by ar...@apache.org.
HDDS-71. Send ContainerType to Datanode during container creation. Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/132a547d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/132a547d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/132a547d

Branch: refs/heads/HDDS-48
Commit: 132a547dea4081948c39c149c59d6453003fa277
Parents: 73e9120
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Mon May 21 22:57:08 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Mon May 21 23:08:20 2018 +0530

----------------------------------------------------------------------
 .../scm/storage/ContainerProtocolCalls.java     |  2 ++
 .../main/proto/DatanodeContainerProtocol.proto  |  6 ++++
 .../container/common/helpers/ContainerData.java | 36 ++++++++++++++++++++
 .../common/impl/ContainerManagerImpl.java       |  7 ++++
 4 files changed, 51 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/132a547d/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 5fbf373..d3af083 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -243,6 +243,8 @@ public final class ContainerProtocolCalls  {
     ContainerProtos.ContainerData.Builder containerData = ContainerProtos
         .ContainerData.newBuilder();
     containerData.setContainerID(containerID);
+    containerData.setContainerType(ContainerProtos.ContainerType
+        .KeyValueContainer);
     createRequest.setContainerData(containerData.build());
 
     String id = client.getPipeline().getLeader().getUuidString();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132a547d/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 3479866..e7e5b2b 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -225,6 +225,12 @@ message ContainerData {
   optional int64 size = 7;
   optional int64 keyCount = 8;
   optional ContainerLifeCycleState state = 9 [default = OPEN];
+  optional ContainerType containerType = 10 [default = KeyValueContainer];
+  optional string containerDBType = 11;
+}
+
+enum ContainerType {
+  KeyValueContainer = 1;
 }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132a547d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
index 63111c8..2a079b0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
@@ -22,6 +22,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerType;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerLifeCycleState;
 import org.apache.hadoop.ozone.OzoneConsts;
 
@@ -47,6 +49,8 @@ public class ContainerData {
   private long maxSize;
   private long containerID;
   private ContainerLifeCycleState state;
+  private ContainerType containerType;
+  private String containerDBType;
 
   /**
    * Constructs a  ContainerData Object.
@@ -99,9 +103,26 @@ public class ContainerData {
     if (protoData.hasSize()) {
       data.setMaxSize(protoData.getSize());
     }
+
+    if(protoData.hasContainerType()) {
+      data.setContainerType(protoData.getContainerType());
+    }
+
+    if(protoData.hasContainerDBType()) {
+      data.setContainerDBType(protoData.getContainerDBType());
+    }
+
     return data;
   }
 
+  public String getContainerDBType() {
+    return containerDBType;
+  }
+
+  public void setContainerDBType(String containerDBType) {
+    this.containerDBType = containerDBType;
+  }
+
   /**
    * Returns a ProtoBuf Message from ContainerData.
    *
@@ -141,9 +162,24 @@ public class ContainerData {
       builder.setSize(this.getMaxSize());
     }
 
+    if(this.getContainerType() != null) {
+      builder.setContainerType(containerType);
+    }
+
+    if(this.getContainerDBType() != null) {
+      builder.setContainerDBType(containerDBType);
+    }
+
     return builder.build();
   }
 
+  public void setContainerType(ContainerType containerType) {
+    this.containerType = containerType;
+  }
+
+  public ContainerType getContainerType() {
+    return this.containerType;
+  }
   /**
    * Adds metadata.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132a547d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 039b4c3..c443ace 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.StorageTypeProto;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
@@ -400,6 +401,12 @@ public class ContainerManagerImpl implements ContainerManager {
           .toString());
       containerData.setContainerPath(containerFile.toString());
 
+      if(containerData.getContainerDBType() == null) {
+        String impl = conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
+            OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT);
+        containerData.setContainerDBType(impl);
+      }
+
       ContainerProtos.ContainerData protoData = containerData
           .getProtoBufMessage();
       protoData.writeDelimitedTo(dos);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: YARN-7530. Refactored YARN service API project location. Contributed by Chandni Singh

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
new file mode 100644
index 0000000..d90ae06
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
@@ -0,0 +1,594 @@
+# Hadoop YARN REST APIs for services v1 spec in YAML
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+swagger: '2.0'
+info:
+  title: "YARN Simplified API layer for services"
+  description: |
+    Bringing a new service on YARN today is not a simple experience. The APIs of existing
+    frameworks are either too low level (native YARN), require writing new code (for frameworks with programmatic APIs)
+    or writing a complex spec (for declarative frameworks).
+
+    This simplified REST API can be used to create and manage the lifecycle of YARN services.
+    In most cases, the application owner will not be forced to make any changes to their applications.
+    This is primarily true if the application is packaged with containerization technologies like Docker.
+
+    This document describes the API specifications (aka. YarnFile) for deploying/managing
+    containerized services on YARN. The same JSON spec can be used for both REST API
+    and CLI to manage the services.
+
+  version: "1.0.0"
+  license:
+    name: Apache 2.0
+    url: http://www.apache.org/licenses/LICENSE-2.0.html
+# the domain of the service
+host: localhost
+port: 8088(default)
+# array of all schemes that your API supports
+schemes:
+  - http
+consumes:
+  - application/json
+produces:
+  - application/json
+paths:
+  /app/v1/services/version:
+    get:
+      summary: Get current version of the API server.
+      description: Get current version of the API server.
+      responses:
+        200:
+          description: Successful request
+
+  /app/v1/services:
+    get:
+      summary: (TBD) List of services running in the cluster.
+      description: Get a list of all currently running services (response includes a minimal projection of the service info). For more details do a GET on a specific service name.
+      responses:
+        200:
+          description: An array of services
+          schema:
+            type: array
+            items:
+              $ref: '#/definitions/Service'
+        default:
+          description: Unexpected error
+          schema:
+            $ref: '#/definitions/ServiceStatus'
+    post:
+      summary: Create a service
+      description: Create a service. The request JSON is a service object with details required for creation. If the request is successful it returns 202 Accepted. A success of this API only confirms success in submission of the service creation request. There is no guarantee that the service will actually reach a RUNNING state. Resource availability and several other factors determines if the service will be deployed in the cluster. It is expected that clients would subsequently call the GET API to get details of the service and determine its state.
+      parameters:
+        - name: Service
+          in: body
+          description: Service request object
+          required: true
+          schema:
+            $ref: '#/definitions/Service'
+      responses:
+        202:
+          description: The request to create a service is accepted
+        400:
+          description: Invalid service definition provided in the request body
+        500:
+          description: Failed to create a service
+        default:
+          description: Unexpected error
+          schema:
+            $ref: '#/definitions/ServiceStatus'
+
+  /app/v1/services/{service_name}:
+    put:
+      summary: Update a service or upgrade the binary version of the components of a running service
+      description: Update the runtime properties of a service. Currently the following operations are supported - update lifetime, stop/start a service.
+                   The PUT operation is also used to orchestrate an upgrade of the service containers to a newer version of their artifacts (TBD).
+      parameters:
+        - name: service_name
+          in: path
+          description: Service name
+          required: true
+          type: string
+        - name: Service
+          in: body
+          description: The updated service definition. It can contain the updated lifetime of a service or the desired state (STOPPED/STARTED) of a service to initiate a start/stop operation against the specified service
+          required: true
+          schema:
+            $ref: '#/definitions/Service'
+      responses:
+        204:
+          description: Update or upgrade was successful
+        404:
+          description: Service does not exist
+        default:
+          description: Unexpected error
+          schema:
+            $ref: '#/definitions/ServiceStatus'
+    delete:
+      summary: Destroy a service
+      description: Destroy a service and release all resources. This API might have to return JSON data providing location of logs (TBD), etc.
+      parameters:
+        - name: service_name
+          in: path
+          description: Service name
+          required: true
+          type: string
+      responses:
+        204:
+          description: Destroy was successful
+        404:
+          description: Service does not exist
+        default:
+          description: Unexpected error
+          schema:
+            $ref: '#/definitions/ServiceStatus'
+    get:
+      summary: Get details of a service.
+      description: Return the details (including containers) of a running service
+      parameters:
+        - name: service_name
+          in: path
+          description: Service name
+          required: true
+          type: string
+      responses:
+        200:
+          description: a service object
+          schema:
+            type: object
+            items:
+              $ref: '#/definitions/Service'
+          examples:
+            service_name: logsearch
+            artifact:
+              id: logsearch:latest
+              type: docker
+        404:
+          description: Service does not exist
+        default:
+          description: Unexpected error
+          schema:
+            $ref: '#/definitions/ServiceStatus'
+  /app/v1/services/{service_name}/components/{component_name}:
+    put:
+      summary: Flex a component's number of instances.
+      description: Set a component's desired number of instanes
+      parameters:
+        - name: service_name
+          in: path
+          description: Service name
+          required: true
+          type: string
+        - name: component_name
+          in: path
+          description: Component name
+          required: true
+          type: string
+        - name: Component
+          in: body
+          description: The definition of a component which contains the updated number of instances.
+          required: true
+          schema:
+            $ref: '#/definitions/Component'
+      responses:
+        200:
+          description: Flex was successful
+        404:
+          description: Service does not exist
+        default:
+          description: Unexpected error
+          schema:
+            $ref: '#/definitions/ServiceStatus'
+definitions:
+  Service:
+    description: a service resource has the following attributes.
+    required:
+      - name
+      - version
+    properties:
+      name:
+        type: string
+        description: A unique service name. If Registry DNS is enabled, the max length is 63 characters.
+      version:
+        type: string
+        description: Version of the service.
+      description:
+        type: string
+        description: Description of the service.
+      id:
+        type: string
+        description: A unique service id.
+      artifact:
+        description: The default artifact for all components of the service except the components which has Artifact type set to SERVICE (optional).
+        $ref: '#/definitions/Artifact'
+      resource:
+        description: The default resource for all components of the service (optional).
+        $ref: '#/definitions/Resource'
+      launch_time:
+        type: string
+        format: date
+        description: The time when the service was created, e.g. 2016-03-16T01:01:49.000Z.
+      number_of_running_containers:
+        type: integer
+        format: int64
+        description: In get response this provides the total number of running containers for this service (across all components) at the time of request. Note, a subsequent request can return a different number as and when more containers get allocated until it reaches the total number of containers or if a flex request has been made between the two requests.
+      lifetime:
+        type: integer
+        format: int64
+        description: Life time (in seconds) of the service from the time it reaches the STARTED state (after which it is automatically destroyed by YARN). For unlimited lifetime do not set a lifetime value.
+      components:
+        description: Components of a service.
+        type: array
+        items:
+          $ref: '#/definitions/Component'
+      configuration:
+        description: Config properties of a service. Configurations provided at the service/global level are available to all the components. Specific properties can be overridden at the component level.
+        $ref: '#/definitions/Configuration'
+      state:
+        description: State of the service. Specifying a value for this attribute for the PUT payload means update the service to this desired state.
+        $ref: '#/definitions/ServiceState'
+      quicklinks:
+        type: object
+        description: A blob of key-value pairs of quicklinks to be exported for a service.
+        additionalProperties:
+          type: string
+      queue:
+        type: string
+        description: The YARN queue that this service should be submitted to.
+      kerberos_principal:
+        description: The principal info of the user who launches the service.
+        $ref: '#/definitions/KerberosPrincipal'
+      docker_client_config:
+        type: string
+        description: URI of the file containing the docker client configuration (e.g. hdfs:///tmp/config.json).
+  ResourceInformation:
+    description:
+      ResourceInformation determines unit/value of resource types in addition to memory and vcores. It will be part of Resource object.
+    properties:
+      value:
+        type: integer
+        format: int64
+        description: Integer value of the resource.
+      unit:
+        type: string
+        description: Unit of the resource, acceptable values are - p/n/u/m/k/M/G/T/P/Ki/Mi/Gi/Ti/Pi. By default it is empty means no unit.
+  Resource:
+    description:
+      Resource determines the amount of resources (vcores, memory, network, etc.) usable by a container. This field determines the resource to be applied for all the containers of a component or service. The resource specified at the service (or global) level can be overriden at the component level. Only one of profile OR cpu & memory are expected. It raises a validation exception otherwise.
+    properties:
+      profile:
+        type: string
+        description: Each resource profile has a unique id which is associated with a cluster-level predefined memory, cpus, etc.
+      cpus:
+        type: integer
+        format: int32
+        description: Amount of vcores allocated to each container (optional but overrides cpus in profile if specified).
+      memory:
+        type: string
+        description: Amount of memory allocated to each container (optional but overrides memory in profile if specified). Currently accepts only an integer value and default unit is in MB.
+      additional:
+        type: object
+        additionalProperties:
+          $ref: '#/definitions/ResourceInformation'
+        description: A map of resource type name to resource type information. Including value (integer), and unit (string). This will be used to specify resource other than cpu and memory. Please refer to example below.
+  PlacementPolicy:
+    description: Advanced placement policy of the components of a service.
+    required:
+      - constraints
+    properties:
+      constraints:
+        description: Placement constraint details.
+        type: array
+        items:
+          $ref: '#/definitions/PlacementConstraint'
+  PlacementConstraint:
+    description: Placement constraint details.
+    required:
+      - type
+      - scope
+    properties:
+      name:
+        description: An optional name associated to this constraint.
+        type: string
+        example: C1
+      type:
+        description: The type of placement.
+        $ref: '#/definitions/PlacementType'
+      scope:
+        description: The scope of placement.
+        $ref: '#/definitions/PlacementScope'
+      target_tags:
+        description: The name of the components that this component's placement policy is depending upon are added as target tags. So for affinity say, this component's containers are requesting to be placed on hosts where containers of the target tag component(s) are running on. Target tags can also contain the name of this component, in which case it implies that for anti-affinity say, no more than one container of this component can be placed on a host. Similarly, for cardinality, it would mean that containers of this component is requesting to be placed on hosts where at least minCardinality but no more than maxCardinality containers of the target tag component(s) are running.
+        type: array
+        items:
+          type: string
+      node_attributes:
+        description: Node attributes are a set of key:value(s) pairs associated with nodes.
+        type: object
+        additionalProperties:
+          type: array
+          items:
+            type: string
+      node_partitions:
+        description: Node partitions where the containers of this component can run.
+        type: array
+        items:
+          type: string
+      min_cardinality:
+        type: integer
+        format: int64
+        description: When placement type is cardinality, the minimum number of containers of the depending component that a host should have, where containers of this component can be allocated on.
+        example: 2
+      max_cardinality:
+        type: integer
+        format: int64
+        description: When placement type is cardinality, the maximum number of containers of the depending component that a host should have, where containers of this component can be allocated on.
+        example: 3
+  PlacementType:
+    description: The type of placement - affinity/anti-affinity/affinity-with-cardinality with containers of another component or containers of the same component (self).
+    properties:
+      type:
+        type: string
+        enum:
+          - AFFINITY
+          - ANTI_AFFINITY
+          - AFFINITY_WITH_CARDINALITY
+  PlacementScope:
+    description: The scope of placement for the containers of a component.
+    properties:
+      type:
+        type: string
+        enum:
+          - NODE
+          - RACK
+  Artifact:
+    description: Artifact of a service component. If not specified, component will just run the bare launch command and no artifact will be localized.
+    required:
+    - id
+    properties:
+      id:
+        type: string
+        description: Artifact id. Examples are package location uri for tarball based services, image name for docker, name of service, etc.
+      type:
+        type: string
+        description: Artifact type, like docker, tarball, etc. (optional). For TARBALL type, the specified tarball will be localized to the container local working directory under a folder named lib. For SERVICE type, the service specified will be read and its components will be added into this service. The original component with artifact type SERVICE will be removed (any properties specified in the original component will be ignored).
+        enum:
+          - DOCKER
+          - TARBALL
+          - SERVICE
+        default: DOCKER
+      uri:
+        type: string
+        description: Artifact location to support multiple artifact stores (optional).
+  Component:
+    description: One or more components of the service. If the service is HBase say, then the component can be a simple role like master or regionserver. If the service is a complex business webapp then a component can be other services say Kafka or Storm. Thereby it opens up the support for complex and nested services.
+    required:
+    - name
+    properties:
+      name:
+        type: string
+        description: Name of the service component (mandatory). If Registry DNS is enabled, the max length is 63 characters. If unique component support is enabled, the max length is lowered to 44 characters.
+      state:
+        description: The state of the component
+        $ref: "#/definitions/ComponentState"
+      dependencies:
+        type: array
+        items:
+          type: string
+        description: An array of service components which should be in READY state (as defined by readiness check), before this component can be started. The dependencies across all components of a service should be represented as a DAG.
+      readiness_check:
+        description: Readiness check for this component.
+        $ref: '#/definitions/ReadinessCheck'
+      artifact:
+        description: Artifact of the component (optional). If not specified, the service level global artifact takes effect.
+        $ref: '#/definitions/Artifact'
+      launch_command:
+        type: string
+        description: The custom launch command of this component (optional for DOCKER component, required otherwise). When specified at the component level, it overrides the value specified at the global level (if any).
+      resource:
+        description: Resource of this component (optional). If not specified, the service level global resource takes effect.
+        $ref: '#/definitions/Resource'
+      number_of_containers:
+        type: integer
+        format: int64
+        description: Number of containers for this component (optional). If not specified, the service level global number_of_containers takes effect.
+      containers:
+        type: array
+        description: Containers of a started component. Specifying a value for this attribute for the POST payload raises a validation error. This blob is available only in the GET response of a started service.
+        items:
+          $ref: '#/definitions/Container'
+      run_privileged_container:
+        type: boolean
+        description: Run all containers of this component in privileged mode (YARN-4262).
+      placement_policy:
+        description: Advanced scheduling and placement policies for all containers of this component.
+        $ref: '#/definitions/PlacementPolicy'
+      configuration:
+        description: Config properties for this component.
+        $ref: '#/definitions/Configuration'
+      quicklinks:
+        type: array
+        items:
+          type: string
+        description: A list of quicklink keys defined at the service level, and to be resolved by this component.
+      restartPolicy:
+        type: string
+        description: Policy of restart component. Including ALWAYS (Always restart component even if instance exit code = 0); ON_FAILURE (Only restart component if instance exit code != 0); NEVER (Do not restart in any cases)
+        enum:
+          - ALWAYS
+          - ON_FAILURE
+          - NEVER
+        default: ALWAYS
+  ReadinessCheck:
+    description: A check to be performed to determine the readiness of a component instance (a container). If no readiness check is specified, the default readiness check will be used unless the yarn.service.default-readiness-check.enabled configuration property is set to false at the component, service, or system level. The artifact field is currently unsupported but may be implemented in the future, enabling a pluggable helper container to support advanced use cases.
+    required:
+    - type
+    properties:
+      type:
+        type: string
+        description: DEFAULT (AM checks whether the container has an IP and optionally performs a DNS lookup for the container hostname), HTTP (AM performs default checks, plus sends a REST call to the container and expects a response code between 200 and 299), or PORT (AM performs default checks, plus attempts to open a socket connection to the container on a specified port).
+        enum:
+          - DEFAULT
+          - HTTP
+          - PORT
+      properties:
+        type: object
+        description: A blob of key value pairs that will be used to configure the check.
+        additionalProperties:
+          type: string
+      artifact:
+        description: Artifact of the pluggable readiness check helper container (optional). If specified, this helper container typically hosts the http uri and encapsulates the complex scripts required to perform actual container readiness check. At the end it is expected to respond a 204 No content just like the simplified use case. This pluggable framework benefits service owners who can run services without any packaging modifications. Note, artifacts of type docker only is supported for now. NOT IMPLEMENTED YET
+        $ref: '#/definitions/Artifact'
+  Configuration:
+    description: Set of configuration properties that can be injected into the service components via envs, files and custom pluggable helper docker containers. Files of several standard formats like xml, properties, json, yaml and templates will be supported.
+    properties:
+      properties:
+        type: object
+        description: A blob of key-value pairs for configuring the YARN service AM.
+        additionalProperties:
+          type: string
+      env:
+        type: object
+        description: A blob of key-value pairs which will be appended to the default system properties and handed off to the service at start time. All placeholder references to properties will be substituted before injection.
+        additionalProperties:
+          type: string
+      files:
+        description: Array of list of files that needs to be created and made available as volumes in the service component containers.
+        type: array
+        items:
+          $ref: '#/definitions/ConfigFile'
+  ConfigFile:
+    description: A config file that needs to be created and made available as a volume in a service component container.
+    properties:
+      type:
+        type: string
+        description: Config file in the standard format like xml, properties, json, yaml, template.
+        enum:
+          - XML
+          - PROPERTIES
+          - JSON
+          - YAML
+          - TEMPLATE
+          - HADOOP_XML
+          - STATIC
+          - ARCHIVE
+      dest_file:
+        type: string
+        description: The path that this configuration file should be created as. If it is an absolute path, it will be mounted into the DOCKER container. Absolute paths are only allowed for DOCKER containers.  If it is a relative path, only the file name should be provided, and the file will be created in the container local working directory under a folder named conf.
+      src_file:
+        type: string
+        description: This provides the source location of the configuration file, the content of which is dumped to dest_file post property substitutions, in the format as specified in type. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, or hdfs etc. Currently, only hdfs is supported.
+      properties:
+        type: object
+        description: A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any, existing properties in src_file or be added as new properties in src_file.
+        additionalProperties:
+          type: string
+  Container:
+    description: An instance of a running service container.
+    properties:
+      id:
+        type: string
+        description: Unique container id of a running service, e.g. container_e3751_1458061340047_0008_01_000002.
+      launch_time:
+        type: string
+        format: date
+        description: The time when the container was created, e.g. 2016-03-16T01:01:49.000Z. This will most likely be different from cluster launch time.
+      ip:
+        type: string
+        description: IP address of a running container, e.g. 172.31.42.141. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.
+      hostname:
+        type: string
+        description: Fully qualified hostname of a running container, e.g. ctr-e3751-1458061340047-0008-01-000002.examplestg.site. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.
+      bare_host:
+        type: string
+        description: The bare node or host in which the container is running, e.g. cn008.example.com.
+      state:
+        description: State of the container of a service.
+        $ref: '#/definitions/ContainerState'
+      component_instance_name:
+        type: string
+        description: Name of the component instance that this container instance belongs to. Component instance name is named as $COMPONENT_NAME-i, where i is a
+                     monotonically increasing integer. E.g. A componet called nginx can have multiple component instances named as nginx-0, nginx-1 etc.
+                     Each component instance is backed by a container instance.
+      resource:
+        description: Resource used for this container.
+        $ref: '#/definitions/Resource'
+      artifact:
+        description: Artifact used for this container.
+        $ref: '#/definitions/Artifact'
+      privileged_container:
+        type: boolean
+        description: Container running in privileged mode or not.
+  ServiceState:
+    description: The current state of a service.
+    properties:
+      state:
+        type: string
+        description: enum of the state of the service
+        enum:
+          - ACCEPTED
+          - STARTED
+          - STABLE
+          - STOPPED
+          - FAILED
+          - FLEX
+          - UPGRADING
+  ContainerState:
+    description: The current state of the container of a service.
+    properties:
+      state:
+        type: string
+        description: enum of the state of the container
+        enum:
+          - INIT
+          - STARTED
+          - READY
+  ComponentState:
+    description: The state of the component
+    properties:
+      state:
+        type: string
+        description: enum of the state of the component
+        enum:
+          - INIT
+          - FLEXING
+          - STABLE
+          - UPGRADING
+  ServiceStatus:
+    description: The current status of a submitted service, returned as a response to the GET API.
+    properties:
+      diagnostics:
+        type: string
+        description: Diagnostic information (if any) for the reason of the current state of the service. It typically has a non-null value, if the service is in a non-running state.
+      state:
+        description: Service state.
+        $ref: '#/definitions/ServiceState'
+      code:
+        type: integer
+        format: int32
+        description: An error code specific to a scenario which service owners should be able to use to understand the failure in addition to the diagnostic information.
+  KerberosPrincipal:
+    description: The kerberos principal info of the user who launches the service.
+    properties:
+      principal_name:
+        type: string
+        description: The principal name of the user who launches the service. Note that `_HOST` is required in the `principal_name` field such as `testuser/_HOST@EXAMPLE.COM` because Hadoop client validates that the server's (in this case, the AM's) principal has hostname present when communicating to the server.
+      keytab:
+        type: string
+        description: The URI of the kerberos keytab. Currently supports only files present on the bare host. URI starts with "file\://" - A path on the local host where the keytab is stored. It is assumed that admin pre-installs the keytabs on the local host before AM launches.
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/resources/log4j-server.properties
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/resources/log4j-server.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/resources/log4j-server.properties
new file mode 100644
index 0000000..8c679b9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/resources/log4j-server.properties
@@ -0,0 +1,76 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+
+# This is the log4j configuration for YARN Services REST API Server
+
+# Log rotation based on size (100KB) with a max of 10 backup files
+log4j.rootLogger=INFO, restservicelog
+log4j.threshhold=ALL
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} (%F:%M(%L)) - %m%n
+
+log4j.appender.restservicelog=org.apache.log4j.RollingFileAppender
+log4j.appender.restservicelog.layout=org.apache.log4j.PatternLayout
+log4j.appender.restservicelog.File=${REST_SERVICE_LOG_DIR}/restservice.log
+log4j.appender.restservicelog.MaxFileSize=1GB
+log4j.appender.restservicelog.MaxBackupIndex=10
+
+# log layout skips stack-trace creation operations by avoiding line numbers and method
+log4j.appender.restservicelog.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n
+
+# debug edition is much more expensive
+#log4j.appender.restservicelog.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
+
+# configure stderr
+# set the conversion pattern of stderr
+# Print the date in ISO 8601 format
+log4j.appender.stderr=org.apache.log4j.ConsoleAppender
+log4j.appender.stderr.Target=System.err
+log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
+log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n
+
+log4j.appender.subprocess=org.apache.log4j.ConsoleAppender
+log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout
+log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n
+
+# for debugging REST API Service
+#log4j.logger.org.apache.hadoop.yarn.services=DEBUG
+
+# uncomment to debug service lifecycle issues
+#log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG
+#log4j.logger.org.apache.hadoop.yarn.service=DEBUG
+
+# uncomment for YARN operations
+#log4j.logger.org.apache.hadoop.yarn.client=DEBUG
+
+# uncomment this to debug security problems
+#log4j.logger.org.apache.hadoop.security=DEBUG
+
+#crank back on some noise
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+log4j.logger.org.apache.hadoop.hdfs=WARN
+log4j.logger.org.apache.hadoop.hdfs.shortcircuit=ERROR
+
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN
+log4j.logger.org.apache.zookeeper=WARN
+log4j.logger.org.apache.curator.framework.state=ERROR
+log4j.logger.org.apache.curator.framework.imps=WARN
+
+log4j.logger.org.mortbay.log=DEBUG

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/resources/webapps/api-server/app
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/resources/webapps/api-server/app b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/resources/webapps/api-server/app
new file mode 100644
index 0000000..6a077b1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/resources/webapps/api-server/app
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DON'T DELETE. REST WEBAPP RUN SCRIPT WILL STOP WORKING.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml
new file mode 100644
index 0000000..1282c9f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<web-app xmlns="http://java.sun.com/xml/ns/javaee" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+        xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_3_0.xsd"
+        version="3.0">
+
+    <servlet>
+        <servlet-name>Jersey REST API</servlet-name>
+        <servlet-class>com.sun.jersey.spi.container.servlet.ServletContainer</servlet-class>
+        <init-param>
+            <param-name>com.sun.jersey.config.property.packages</param-name>
+            <param-value>org.apache.hadoop.yarn.service.webapp,org.apache.hadoop.yarn.service.api,org.apache.hadoop.yarn.service.api.records</param-value>
+        </init-param>
+        <init-param>
+          <param-name>com.sun.jersey.api.json.POJOMappingFeature</param-name>
+          <param-value>true</param-value>
+        </init-param>
+        <load-on-startup>1</load-on-startup>
+    </servlet>
+    <servlet-mapping>
+        <servlet-name>Jersey REST API</servlet-name>
+        <url-pattern>/*</url-pattern>
+    </servlet-mapping>
+</web-app>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
new file mode 100644
index 0000000..75b9486
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
@@ -0,0 +1,210 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.service.api.records.Artifact;
+import org.apache.hadoop.yarn.service.api.records.Component;
+import org.apache.hadoop.yarn.service.api.records.Container;
+import org.apache.hadoop.yarn.service.api.records.ContainerState;
+import org.apache.hadoop.yarn.service.api.records.Resource;
+import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.client.ServiceClient;
+import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
+import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * A mock version of ServiceClient - This class is design
+ * to simulate various error conditions that will happen
+ * when a consumer class calls ServiceClient.
+ */
+public class ServiceClientTest extends ServiceClient {
+
+  private Configuration conf = new Configuration();
+  private Service goodServiceStatus = buildLiveGoodService();
+  private boolean initialized;
+  private Set<String> expectedInstances = new HashSet<>();
+
+  public ServiceClientTest() {
+    super();
+  }
+
+  @Override
+  public void init(Configuration conf) {
+    if (!initialized) {
+      super.init(conf);
+      initialized = true;
+    }
+  }
+
+  @Override
+  public void stop() {
+    // This is needed for testing  API Server which uses client to get status
+    // and then perform an action.
+  }
+
+  public void forceStop() {
+    expectedInstances.clear();
+    super.stop();
+  }
+
+  @Override
+  public Configuration getConfig() {
+    return conf;
+  }
+
+  @Override
+  public ApplicationId actionCreate(Service service) throws IOException {
+    ServiceApiUtil.validateAndResolveService(service,
+        new SliderFileSystem(conf), getConfig());
+    return ApplicationId.newInstance(System.currentTimeMillis(), 1);
+  }
+
+  @Override
+  public Service getStatus(String appName) throws FileNotFoundException {
+    if ("jenkins".equals(appName)) {
+      return goodServiceStatus;
+    } else {
+      throw new FileNotFoundException("Service " + appName + " not found");
+    }
+  }
+
+  @Override
+  public int actionStart(String serviceName)
+      throws YarnException, IOException {
+    if (serviceName != null && serviceName.equals("jenkins")) {
+      return EXIT_SUCCESS;
+    } else {
+      throw new ApplicationNotFoundException("");
+    }
+  }
+
+  @Override
+  public int actionStop(String serviceName, boolean waitForAppStopped)
+      throws YarnException, IOException {
+    if (serviceName == null) {
+      throw new NullPointerException();
+    }
+    if (serviceName.equals("jenkins")) {
+      return EXIT_SUCCESS;
+    } else if (serviceName.equals("jenkins-second-stop")) {
+      return EXIT_COMMAND_ARGUMENT_ERROR;
+    } else {
+      throw new ApplicationNotFoundException("");
+    }
+  }
+
+  @Override
+  public int actionDestroy(String serviceName) {
+    if (serviceName != null) {
+      if (serviceName.equals("jenkins")) {
+        return EXIT_SUCCESS;
+      } else if (serviceName.equals("jenkins-already-stopped")) {
+        return EXIT_SUCCESS;
+      } else if (serviceName.equals("jenkins-doesn't-exist")) {
+        return EXIT_NOT_FOUND;
+      } else if (serviceName.equals("jenkins-error-cleaning-registry")) {
+        return EXIT_OTHER_FAILURE;
+      }
+    }
+    throw new IllegalArgumentException();
+  }
+
+  @Override
+  public int initiateUpgrade(Service service) throws YarnException,
+      IOException {
+    if (service.getName() != null && service.getName().equals("jenkins")) {
+      return EXIT_SUCCESS;
+    } else {
+      throw new IllegalArgumentException();
+    }
+  }
+
+  @Override
+  public int actionUpgrade(Service service, List<Container> compInstances)
+      throws IOException, YarnException {
+    if (service.getName() != null && service.getName().equals("jenkins")
+        && compInstances != null) {
+      Set<String> actualInstances = compInstances.stream().map(
+          Container::getComponentInstanceName).collect(Collectors.toSet());
+      if (actualInstances.equals(expectedInstances)) {
+        return EXIT_SUCCESS;
+      }
+    }
+    throw new IllegalArgumentException();
+  }
+
+  Service getGoodServiceStatus() {
+    return goodServiceStatus;
+  }
+
+  void setExpectedInstances(Set<String> instances) {
+    if (instances != null) {
+      expectedInstances.addAll(instances);
+    }
+  }
+
+  static Service buildGoodService() {
+    Service service = new Service();
+    service.setName("jenkins");
+    service.setVersion("v1");
+    Artifact artifact = new Artifact();
+    artifact.setType(Artifact.TypeEnum.DOCKER);
+    artifact.setId("jenkins:latest");
+    Resource resource = new Resource();
+    resource.setCpus(1);
+    resource.setMemory("2048");
+    List<Component> components = new ArrayList<>();
+    for (int i = 0; i < 2; i++) {
+      Component c = new Component();
+      c.setName("jenkins" + i);
+      c.setNumberOfContainers(2L);
+      c.setArtifact(artifact);
+      c.setLaunchCommand("");
+      c.setResource(resource);
+      components.add(c);
+    }
+    service.setComponents(components);
+    return service;
+  }
+
+  static Service buildLiveGoodService() {
+    Service service = buildGoodService();
+    Component comp = service.getComponents().iterator().next();
+    List<Container> containers = new ArrayList<>();
+    for (int i = 0; i < comp.getNumberOfContainers(); i++) {
+      Container container = new Container();
+      container.setComponentInstanceName(comp.getName() + "-" + (i + 1));
+      container.setState(ContainerState.READY);
+      containers.add(container);
+    }
+    comp.setContainers(containers);
+    return service;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
new file mode 100644
index 0000000..733b9bc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
@@ -0,0 +1,623 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service;
+
+import static org.junit.Assert.*;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.ws.rs.Path;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+
+import com.google.common.collect.Sets;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.service.api.records.Artifact;
+import org.apache.hadoop.yarn.service.api.records.Artifact.TypeEnum;
+import org.apache.hadoop.yarn.service.api.records.Component;
+import org.apache.hadoop.yarn.service.api.records.ComponentState;
+import org.apache.hadoop.yarn.service.api.records.Container;
+import org.apache.hadoop.yarn.service.api.records.ContainerState;
+import org.apache.hadoop.yarn.service.api.records.Resource;
+import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.api.records.ServiceState;
+import org.apache.hadoop.yarn.service.api.records.ServiceStatus;
+import org.apache.hadoop.yarn.service.conf.RestApiConstants;
+import org.apache.hadoop.yarn.service.webapp.ApiServer;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+/**
+ * Test case for ApiServer REST API.
+ *
+ */
+public class TestApiServer {
+  private ApiServer apiServer;
+  private HttpServletRequest request;
+  private ServiceClientTest mockServerClient;
+
+  @Before
+  public void setup() throws Exception {
+    request = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(request.getRemoteUser())
+        .thenReturn(System.getProperty("user.name"));
+    mockServerClient = new ServiceClientTest();
+    Configuration conf = new Configuration();
+    conf.set("yarn.api-service.service.client.class",
+        ServiceClientTest.class.getName());
+    apiServer = new ApiServer(conf);
+    apiServer.setServiceClient(mockServerClient);
+  }
+
+  @After
+  public void teardown() {
+    mockServerClient.forceStop();
+  }
+
+  @Test
+  public void testPathAnnotation() {
+    assertNotNull(this.apiServer.getClass().getAnnotation(Path.class));
+    assertTrue("The controller has the annotation Path",
+        this.apiServer.getClass().isAnnotationPresent(Path.class));
+    final Path path = this.apiServer.getClass()
+        .getAnnotation(Path.class);
+    assertEquals("The path has /v1 annotation", "/v1", path.value());
+  }
+
+  @Test
+  public void testGetVersion() {
+    final Response actual = apiServer.getVersion();
+    assertEquals("Version number is", Response.ok().build().getStatus(),
+        actual.getStatus());
+  }
+
+  @Test
+  public void testBadCreateService() {
+    Service service = new Service();
+    // Test for invalid argument
+    final Response actual = apiServer.createService(request, service);
+    assertEquals("Create service is ",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+  }
+
+  @Test
+  public void testGoodCreateService() throws Exception {
+    String json = "{\"auths\": "
+        + "{\"https://index.docker.io/v1/\": "
+        + "{\"auth\": \"foobarbaz\"},"
+        + "\"registry.example.com\": "
+        + "{\"auth\": \"bazbarfoo\"}}}";
+    File dockerTmpDir = new File("target", "docker-tmp");
+    FileUtils.deleteQuietly(dockerTmpDir);
+    dockerTmpDir.mkdirs();
+    String dockerConfig = dockerTmpDir + "/config.json";
+    BufferedWriter bw = new BufferedWriter(new FileWriter(dockerConfig));
+    bw.write(json);
+    bw.close();
+    Service service = ServiceClientTest.buildGoodService();
+    final Response actual = apiServer.createService(request, service);
+    assertEquals("Create service is ",
+        Response.status(Status.ACCEPTED).build().getStatus(),
+        actual.getStatus());
+  }
+
+  @Test
+  public void testInternalServerErrorDockerClientConfigMissingCreateService() {
+    Service service = new Service();
+    service.setName("jenkins");
+    service.setVersion("v1");
+    service.setDockerClientConfig("/does/not/exist/config.json");
+    Artifact artifact = new Artifact();
+    artifact.setType(TypeEnum.DOCKER);
+    artifact.setId("jenkins:latest");
+    Resource resource = new Resource();
+    resource.setCpus(1);
+    resource.setMemory("2048");
+    List<Component> components = new ArrayList<>();
+    Component c = new Component();
+    c.setName("jenkins");
+    c.setNumberOfContainers(1L);
+    c.setArtifact(artifact);
+    c.setLaunchCommand("");
+    c.setResource(resource);
+    components.add(c);
+    service.setComponents(components);
+    final Response actual = apiServer.createService(request, service);
+    assertEquals("Create service is ",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+  }
+
+  @Test
+  public void testBadGetService() {
+    final String serviceName = "nonexistent-jenkins";
+    final Response actual = apiServer.getService(request, serviceName);
+    assertEquals("Get service is ",
+        Response.status(Status.NOT_FOUND).build().getStatus(),
+        actual.getStatus());
+    ServiceStatus serviceStatus = (ServiceStatus) actual.getEntity();
+    assertEquals("Response code don't match",
+        RestApiConstants.ERROR_CODE_APP_NAME_INVALID, serviceStatus.getCode());
+    assertEquals("Response diagnostics don't match",
+        "Service " + serviceName + " not found",
+        serviceStatus.getDiagnostics());
+  }
+
+  @Test
+  public void testBadGetService2() {
+    final Response actual = apiServer.getService(request, null);
+    assertEquals("Get service is ",
+        Response.status(Status.NOT_FOUND).build().getStatus(),
+        actual.getStatus());
+    ServiceStatus serviceStatus = (ServiceStatus) actual.getEntity();
+    assertEquals("Response code don't match",
+        RestApiConstants.ERROR_CODE_APP_NAME_INVALID, serviceStatus.getCode());
+    assertEquals("Response diagnostics don't match",
+        "Service name cannot be null.", serviceStatus.getDiagnostics());
+  }
+
+  @Test
+  public void testGoodGetService() {
+    final Response actual = apiServer.getService(request, "jenkins");
+    assertEquals("Get service is ",
+        Response.status(Status.OK).build().getStatus(), actual.getStatus());
+  }
+
+  @Test
+  public void testBadDeleteService() {
+    final Response actual = apiServer.deleteService(request, "no-jenkins");
+    assertEquals("Delete service is ",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+  }
+
+  @Test
+  public void testBadDeleteService2() {
+    final Response actual = apiServer.deleteService(request, null);
+    assertEquals("Delete service is ",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+  }
+
+  @Test
+  public void testBadDeleteService3() {
+    final Response actual = apiServer.deleteService(request,
+        "jenkins-doesn't-exist");
+    assertEquals("Delete service is ",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+  }
+
+  @Test
+  public void testBadDeleteService4() {
+    final Response actual = apiServer.deleteService(request,
+        "jenkins-error-cleaning-registry");
+    assertEquals("Delete service is ",
+        Response.status(Status.INTERNAL_SERVER_ERROR).build().getStatus(),
+        actual.getStatus());
+  }
+
+  @Test
+  public void testGoodDeleteService() {
+    final Response actual = apiServer.deleteService(request, "jenkins");
+    assertEquals("Delete service is ",
+        Response.status(Status.OK).build().getStatus(), actual.getStatus());
+  }
+
+  @Test
+  public void testDeleteStoppedService() {
+    final Response actual = apiServer.deleteService(request,
+        "jenkins-already-stopped");
+    assertEquals("Delete service is ",
+        Response.status(Status.OK).build().getStatus(), actual.getStatus());
+  }
+
+  @Test
+  public void testDecreaseContainerAndStop() {
+    Service service = new Service();
+    service.setState(ServiceState.STOPPED);
+    service.setName("jenkins");
+    Artifact artifact = new Artifact();
+    artifact.setType(TypeEnum.DOCKER);
+    artifact.setId("jenkins:latest");
+    Resource resource = new Resource();
+    resource.setCpus(1);
+    resource.setMemory("2048");
+    List<Component> components = new ArrayList<Component>();
+    Component c = new Component();
+    c.setName("jenkins");
+    c.setNumberOfContainers(0L);
+    c.setArtifact(artifact);
+    c.setLaunchCommand("");
+    c.setResource(resource);
+    components.add(c);
+    service.setComponents(components);
+    final Response actual = apiServer.updateService(request, "jenkins",
+        service);
+    assertEquals("update service is ",
+        Response.status(Status.OK).build().getStatus(), actual.getStatus());
+  }
+
+  @Test
+  public void testBadDecreaseContainerAndStop() {
+    Service service = new Service();
+    service.setState(ServiceState.STOPPED);
+    service.setName("no-jenkins");
+    Artifact artifact = new Artifact();
+    artifact.setType(TypeEnum.DOCKER);
+    artifact.setId("jenkins:latest");
+    Resource resource = new Resource();
+    resource.setCpus(1);
+    resource.setMemory("2048");
+    List<Component> components = new ArrayList<Component>();
+    Component c = new Component();
+    c.setName("no-jenkins");
+    c.setNumberOfContainers(-1L);
+    c.setArtifact(artifact);
+    c.setLaunchCommand("");
+    c.setResource(resource);
+    components.add(c);
+    service.setComponents(components);
+    System.out.println("before stop");
+    final Response actual = apiServer.updateService(request, "no-jenkins",
+        service);
+    assertEquals("flex service is ",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+  }
+
+  @Test
+  public void testIncreaseContainersAndStart() {
+    Service service = new Service();
+    service.setState(ServiceState.STARTED);
+    service.setName("jenkins");
+    Artifact artifact = new Artifact();
+    artifact.setType(TypeEnum.DOCKER);
+    artifact.setId("jenkins:latest");
+    Resource resource = new Resource();
+    resource.setCpus(1);
+    resource.setMemory("2048");
+    List<Component> components = new ArrayList<Component>();
+    Component c = new Component();
+    c.setName("jenkins");
+    c.setNumberOfContainers(2L);
+    c.setArtifact(artifact);
+    c.setLaunchCommand("");
+    c.setResource(resource);
+    components.add(c);
+    service.setComponents(components);
+    final Response actual = apiServer.updateService(request, "jenkins",
+        service);
+    assertEquals("flex service is ",
+        Response.status(Status.OK).build().getStatus(), actual.getStatus());
+  }
+
+  @Test
+  public void testBadStartServices() {
+    Service service = new Service();
+    service.setState(ServiceState.STARTED);
+    service.setName("no-jenkins");
+    Artifact artifact = new Artifact();
+    artifact.setType(TypeEnum.DOCKER);
+    artifact.setId("jenkins:latest");
+    Resource resource = new Resource();
+    resource.setCpus(1);
+    resource.setMemory("2048");
+    List<Component> components = new ArrayList<Component>();
+    Component c = new Component();
+    c.setName("jenkins");
+    c.setNumberOfContainers(2L);
+    c.setArtifact(artifact);
+    c.setLaunchCommand("");
+    c.setResource(resource);
+    components.add(c);
+    service.setComponents(components);
+    final Response actual = apiServer.updateService(request, "no-jenkins",
+        service);
+    assertEquals("start service is ",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+  }
+
+  @Test
+  public void testGoodStartServices() {
+    Service service = new Service();
+    service.setState(ServiceState.STARTED);
+    service.setName("jenkins");
+    Artifact artifact = new Artifact();
+    artifact.setType(TypeEnum.DOCKER);
+    artifact.setId("jenkins:latest");
+    Resource resource = new Resource();
+    resource.setCpus(1);
+    resource.setMemory("2048");
+    List<Component> components = new ArrayList<Component>();
+    Component c = new Component();
+    c.setName("jenkins");
+    c.setNumberOfContainers(2L);
+    c.setArtifact(artifact);
+    c.setLaunchCommand("");
+    c.setResource(resource);
+    components.add(c);
+    service.setComponents(components);
+    final Response actual = apiServer.updateService(request, "jenkins",
+        service);
+    assertEquals("start service is ",
+        Response.status(Status.OK).build().getStatus(), actual.getStatus());
+  }
+
+  @Test
+  public void testBadStopServices() {
+    Service service = new Service();
+    service.setState(ServiceState.STOPPED);
+    service.setName("no-jenkins");
+    Artifact artifact = new Artifact();
+    artifact.setType(TypeEnum.DOCKER);
+    artifact.setId("jenkins:latest");
+    Resource resource = new Resource();
+    resource.setCpus(1);
+    resource.setMemory("2048");
+    List<Component> components = new ArrayList<Component>();
+    Component c = new Component();
+    c.setName("no-jenkins");
+    c.setNumberOfContainers(-1L);
+    c.setArtifact(artifact);
+    c.setLaunchCommand("");
+    c.setResource(resource);
+    components.add(c);
+    service.setComponents(components);
+    System.out.println("before stop");
+    final Response actual = apiServer.updateService(request, "no-jenkins",
+        service);
+    assertEquals("stop service is ",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+  }
+
+  @Test
+  public void testGoodStopServices() {
+    Service service = new Service();
+    service.setState(ServiceState.STOPPED);
+    service.setName("jenkins");
+    System.out.println("before stop");
+    final Response actual = apiServer.updateService(request, "jenkins",
+        service);
+    assertEquals("stop service is ",
+        Response.status(Status.OK).build().getStatus(), actual.getStatus());
+  }
+
+  @Test
+  public void testBadSecondStopServices() throws Exception {
+    Service service = new Service();
+    service.setState(ServiceState.STOPPED);
+    service.setName("jenkins-second-stop");
+    // simulates stop on an already stopped service
+    System.out.println("before second stop");
+    final Response actual = apiServer.updateService(request,
+        "jenkins-second-stop", service);
+    assertEquals("stop service should have thrown 400 Bad Request: ",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+    ServiceStatus serviceStatus = (ServiceStatus) actual.getEntity();
+    assertEquals("Stop service should have failed with service already stopped",
+        "Service jenkins-second-stop is already stopped",
+        serviceStatus.getDiagnostics());
+  }
+
+  @Test
+  public void testUpdateService() {
+    Service service = new Service();
+    service.setState(ServiceState.STARTED);
+    service.setName("no-jenkins");
+    Artifact artifact = new Artifact();
+    artifact.setType(TypeEnum.DOCKER);
+    artifact.setId("jenkins:latest");
+    Resource resource = new Resource();
+    resource.setCpus(1);
+    resource.setMemory("2048");
+    List<Component> components = new ArrayList<Component>();
+    Component c = new Component();
+    c.setName("no-jenkins");
+    c.setNumberOfContainers(-1L);
+    c.setArtifact(artifact);
+    c.setLaunchCommand("");
+    c.setResource(resource);
+    components.add(c);
+    service.setComponents(components);
+    System.out.println("before stop");
+    final Response actual = apiServer.updateService(request, "no-jenkins",
+        service);
+    assertEquals("update service is ",
+        Response.status(Status.BAD_REQUEST)
+            .build().getStatus(), actual.getStatus());
+  }
+
+  @Test
+  public void testUpdateComponent() {
+    Response actual = apiServer.updateComponent(request, "jenkins",
+        "jenkins-master", null);
+    ServiceStatus serviceStatus = (ServiceStatus) actual.getEntity();
+    assertEquals("Update component should have failed with 400 bad request",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+    assertEquals("Update component should have failed with no data error",
+        "No component data provided", serviceStatus.getDiagnostics());
+
+    Component comp = new Component();
+    actual = apiServer.updateComponent(request, "jenkins", "jenkins-master",
+        comp);
+    serviceStatus = (ServiceStatus) actual.getEntity();
+    assertEquals("Update component should have failed with 400 bad request",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+    assertEquals("Update component should have failed with no count error",
+        "No container count provided", serviceStatus.getDiagnostics());
+
+    comp.setNumberOfContainers(-1L);
+    actual = apiServer.updateComponent(request, "jenkins", "jenkins-master",
+        comp);
+    serviceStatus = (ServiceStatus) actual.getEntity();
+    assertEquals("Update component should have failed with 400 bad request",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+    assertEquals("Update component should have failed with no count error",
+        "Invalid number of containers specified -1", serviceStatus.getDiagnostics());
+
+    comp.setName("jenkins-slave");
+    comp.setNumberOfContainers(1L);
+    actual = apiServer.updateComponent(request, "jenkins", "jenkins-master",
+        comp);
+    serviceStatus = (ServiceStatus) actual.getEntity();
+    assertEquals("Update component should have failed with 400 bad request",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+    assertEquals(
+        "Update component should have failed with component name mismatch "
+            + "error",
+        "Component name in the request object (jenkins-slave) does not match "
+            + "that in the URI path (jenkins-master)",
+        serviceStatus.getDiagnostics());
+  }
+
+  @Test
+  public void testInitiateUpgrade() {
+    Service goodService = ServiceClientTest.buildLiveGoodService();
+    goodService.setVersion("v2");
+    goodService.setState(ServiceState.UPGRADING);
+    final Response actual = apiServer.updateService(request,
+        goodService.getName(), goodService);
+    assertEquals("Initiate upgrade is ",
+        Response.status(Status.ACCEPTED).build().getStatus(),
+        actual.getStatus());
+  }
+
+  @Test
+  public void testUpgradeSingleInstance() {
+    Service goodService = ServiceClientTest.buildLiveGoodService();
+    Component comp = goodService.getComponents().iterator().next();
+    Container container = comp.getContainers().iterator().next();
+    container.setState(ContainerState.UPGRADING);
+
+    // To be able to upgrade, the service needs to be in UPGRADING
+    // and container state needs to be in NEEDS_UPGRADE.
+    Service serviceStatus = mockServerClient.getGoodServiceStatus();
+    serviceStatus.setState(ServiceState.UPGRADING);
+    Container liveContainer = serviceStatus.getComponents().iterator().next()
+        .getContainers().iterator().next();
+    liveContainer.setState(ContainerState.NEEDS_UPGRADE);
+    mockServerClient.setExpectedInstances(Sets.newHashSet(
+        liveContainer.getComponentInstanceName()));
+
+    final Response actual = apiServer.updateComponentInstance(request,
+        goodService.getName(), comp.getName(),
+        container.getComponentInstanceName(), container);
+    assertEquals("Instance upgrade is ",
+        Response.status(Status.ACCEPTED).build().getStatus(),
+        actual.getStatus());
+  }
+
+  @Test
+  public void testUpgradeMultipleInstances() {
+    Service goodService = ServiceClientTest.buildLiveGoodService();
+    Component comp = goodService.getComponents().iterator().next();
+    comp.getContainers().forEach(container ->
+        container.setState(ContainerState.UPGRADING));
+
+    // To be able to upgrade, the service needs to be in UPGRADING
+    // and container state needs to be in NEEDS_UPGRADE.
+    Service serviceStatus = mockServerClient.getGoodServiceStatus();
+    serviceStatus.setState(ServiceState.UPGRADING);
+    Set<String> expectedInstances = new HashSet<>();
+    serviceStatus.getComponents().iterator().next().getContainers().forEach(
+        container -> {
+          container.setState(ContainerState.NEEDS_UPGRADE);
+          expectedInstances.add(container.getComponentInstanceName());
+        }
+    );
+    mockServerClient.setExpectedInstances(expectedInstances);
+
+    final Response actual = apiServer.updateComponentInstances(request,
+        goodService.getName(), comp.getContainers());
+    assertEquals("Instance upgrade is ",
+        Response.status(Status.ACCEPTED).build().getStatus(),
+        actual.getStatus());
+  }
+
+  @Test
+  public void testUpgradeComponent() {
+    Service goodService = ServiceClientTest.buildLiveGoodService();
+    Component comp = goodService.getComponents().iterator().next();
+    comp.setState(ComponentState.UPGRADING);
+
+    // To be able to upgrade, the service needs to be in UPGRADING
+    // and component state needs to be in NEEDS_UPGRADE.
+    Service serviceStatus = mockServerClient.getGoodServiceStatus();
+    serviceStatus.setState(ServiceState.UPGRADING);
+    Component liveComp = serviceStatus.getComponent(comp.getName());
+    liveComp.setState(ComponentState.NEEDS_UPGRADE);
+    Set<String> expectedInstances = new HashSet<>();
+    liveComp.getContainers().forEach(container -> {
+      expectedInstances.add(container.getComponentInstanceName());
+      container.setState(ContainerState.NEEDS_UPGRADE);
+    });
+    mockServerClient.setExpectedInstances(expectedInstances);
+
+    final Response actual = apiServer.updateComponent(request,
+        goodService.getName(), comp.getName(), comp);
+    assertEquals("Component upgrade is ",
+        Response.status(Status.ACCEPTED).build().getStatus(),
+        actual.getStatus());
+  }
+
+  @Test
+  public void testUpgradeMultipleComps() {
+    Service goodService = ServiceClientTest.buildLiveGoodService();
+    goodService.getComponents().forEach(comp ->
+        comp.setState(ComponentState.UPGRADING));
+
+    // To be able to upgrade, the live service needs to be in UPGRADING
+    // and component states needs to be in NEEDS_UPGRADE.
+    Service serviceStatus = mockServerClient.getGoodServiceStatus();
+    serviceStatus.setState(ServiceState.UPGRADING);
+    Set<String> expectedInstances = new HashSet<>();
+    serviceStatus.getComponents().forEach(liveComp -> {
+      liveComp.setState(ComponentState.NEEDS_UPGRADE);
+      liveComp.getContainers().forEach(liveContainer -> {
+        expectedInstances.add(liveContainer.getComponentInstanceName());
+        liveContainer.setState(ContainerState.NEEDS_UPGRADE);
+      });
+    });
+    mockServerClient.setExpectedInstances(expectedInstances);
+
+    final Response actual = apiServer.updateComponents(request,
+        goodService.getName(), goodService.getComponents());
+    assertEquals("Component upgrade is ",
+        Response.status(Status.ACCEPTED).build().getStatus(),
+        actual.getStatus());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java
new file mode 100644
index 0000000..6cf0880
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java
@@ -0,0 +1,314 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.client;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+import java.util.HashMap;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.server.ServerConnector;
+import org.eclipse.jetty.servlet.ServletContextHandler;
+import org.eclipse.jetty.servlet.ServletHolder;
+import org.eclipse.jetty.util.thread.QueuedThreadPool;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.apache.hadoop.yarn.service.exceptions.LauncherExitCodes.*;
+
+/**
+ * Test case for CLI to API Service.
+ *
+ */
+public class TestApiServiceClient {
+  private static ApiServiceClient asc;
+  private static ApiServiceClient badAsc;
+  private static Server server;
+
+  /**
+   * A mocked version of API Service for testing purpose.
+   *
+   */
+  @SuppressWarnings("serial")
+  public static class TestServlet extends HttpServlet {
+
+    @Override
+    protected void doGet(HttpServletRequest req, HttpServletResponse resp)
+        throws ServletException, IOException {
+      System.out.println("Get was called");
+      if (req.getPathInfo() != null
+          && req.getPathInfo().contains("nonexistent-app")) {
+        resp.setStatus(HttpServletResponse.SC_NOT_FOUND);
+      } else {
+        resp.setStatus(HttpServletResponse.SC_OK);
+      }
+    }
+
+    @Override
+    protected void doPost(HttpServletRequest req, HttpServletResponse resp)
+        throws ServletException, IOException {
+      resp.setStatus(HttpServletResponse.SC_OK);
+    }
+
+    @Override
+    protected void doPut(HttpServletRequest req, HttpServletResponse resp)
+        throws ServletException, IOException {
+      resp.setStatus(HttpServletResponse.SC_OK);
+    }
+
+    @Override
+    protected void doDelete(HttpServletRequest req, HttpServletResponse resp)
+        throws ServletException, IOException {
+      resp.setStatus(HttpServletResponse.SC_OK);
+    }
+
+  }
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    server = new Server(8088);
+    ((QueuedThreadPool)server.getThreadPool()).setMaxThreads(10);
+    ServletContextHandler context = new ServletContextHandler();
+    context.setContextPath("/app");
+    server.setHandler(context);
+    context.addServlet(new ServletHolder(TestServlet.class), "/*");
+    ((ServerConnector)server.getConnectors()[0]).setHost("localhost");
+    server.start();
+
+    Configuration conf = new Configuration();
+    conf.set("yarn.resourcemanager.webapp.address",
+        "localhost:8088");
+    asc = new ApiServiceClient();
+    asc.serviceInit(conf);
+
+    Configuration conf2 = new Configuration();
+    conf2.set("yarn.resourcemanager.webapp.address",
+        "localhost:8089");
+    badAsc = new ApiServiceClient();
+    badAsc.serviceInit(conf2);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    server.stop();
+  }
+
+  @Test
+  public void testLaunch() {
+    String fileName = "target/test-classes/example-app.json";
+    String appName = "example-app";
+    long lifetime = 3600L;
+    String queue = "default";
+    try {
+      int result = asc.actionLaunch(fileName, appName, lifetime, queue);
+      assertEquals(EXIT_SUCCESS, result);
+    } catch (IOException | YarnException e) {
+      fail();
+    }
+  }
+
+  @Test
+  public void testBadLaunch() {
+    String fileName = "unknown_file";
+    String appName = "unknown_app";
+    long lifetime = 3600L;
+    String queue = "default";
+    try {
+      int result = badAsc.actionLaunch(fileName, appName, lifetime, queue);
+      assertEquals(EXIT_EXCEPTION_THROWN, result);
+    } catch (IOException | YarnException e) {
+      fail();
+    }
+  }
+
+  @Test
+  public void testStatus() {
+    String appName = "nonexistent-app";
+    try {
+      String result = asc.getStatusString(appName);
+      assertEquals("Status reponse don't match",
+          " Service " + appName + " not found", result);
+    } catch (IOException | YarnException e) {
+      fail();
+    }
+  }
+
+  @Test
+  public void testStop() {
+    String appName = "example-app";
+    try {
+      int result = asc.actionStop(appName);
+      assertEquals(EXIT_SUCCESS, result);
+    } catch (IOException | YarnException e) {
+      fail();
+    }
+  }
+
+  @Test
+  public void testBadStop() {
+    String appName = "unknown_app";
+    try {
+      int result = badAsc.actionStop(appName);
+      assertEquals(EXIT_EXCEPTION_THROWN, result);
+    } catch (IOException | YarnException e) {
+      fail();
+    }
+  }
+
+  @Test
+  public void testStart() {
+    String appName = "example-app";
+    try {
+      int result = asc.actionStart(appName);
+      assertEquals(EXIT_SUCCESS, result);
+    } catch (IOException | YarnException e) {
+      fail();
+    }
+  }
+
+  @Test
+  public void testBadStart() {
+    String appName = "unknown_app";
+    try {
+      int result = badAsc.actionStart(appName);
+      assertEquals(EXIT_EXCEPTION_THROWN, result);
+    } catch (IOException | YarnException e) {
+      fail();
+    }
+  }
+
+  @Test
+  public void testSave() {
+    String fileName = "target/test-classes/example-app.json";
+    String appName = "example-app";
+    long lifetime = 3600L;
+    String queue = "default";
+    try {
+      int result = asc.actionSave(fileName, appName, lifetime, queue);
+      assertEquals(EXIT_SUCCESS, result);
+    } catch (IOException | YarnException e) {
+      fail();
+    }
+  }
+
+  @Test
+  public void testBadSave() {
+    String fileName = "unknown_file";
+    String appName = "unknown_app";
+    long lifetime = 3600L;
+    String queue = "default";
+    try {
+      int result = badAsc.actionSave(fileName, appName, lifetime, queue);
+      assertEquals(EXIT_EXCEPTION_THROWN, result);
+    } catch (IOException | YarnException e) {
+      fail();
+    }
+  }
+
+  @Test
+  public void testFlex() {
+    String appName = "example-app";
+    HashMap<String, String> componentCounts = new HashMap<String, String>();
+    try {
+      int result = asc.actionFlex(appName, componentCounts);
+      assertEquals(EXIT_SUCCESS, result);
+    } catch (IOException | YarnException e) {
+      fail();
+    }
+  }
+
+  @Test
+  public void testBadFlex() {
+    String appName = "unknown_app";
+    HashMap<String, String> componentCounts = new HashMap<String, String>();
+    try {
+      int result = badAsc.actionFlex(appName, componentCounts);
+      assertEquals(EXIT_EXCEPTION_THROWN, result);
+    } catch (IOException | YarnException e) {
+      fail();
+    }
+  }
+
+  @Test
+  public void testDestroy() {
+    String appName = "example-app";
+    try {
+      int result = asc.actionDestroy(appName);
+      assertEquals(EXIT_SUCCESS, result);
+    } catch (IOException | YarnException e) {
+      fail();
+    }
+  }
+
+  @Test
+  public void testBadDestroy() {
+    String appName = "unknown_app";
+    try {
+      int result = badAsc.actionDestroy(appName);
+      assertEquals(EXIT_EXCEPTION_THROWN, result);
+    } catch (IOException | YarnException e) {
+      fail();
+    }
+  }
+
+  @Test
+  public void testInitiateServiceUpgrade() {
+    String appName = "example-app";
+    String upgradeFileName = "target/test-classes/example-app.json";
+    try {
+      int result = asc.initiateUpgrade(appName, upgradeFileName, false);
+      assertEquals(EXIT_SUCCESS, result);
+    } catch (IOException | YarnException e) {
+      fail();
+    }
+  }
+
+  @Test
+  public void testInstancesUpgrade() {
+    String appName = "example-app";
+    try {
+      int result = asc.actionUpgradeInstances(appName, Lists.newArrayList(
+          "comp-1", "comp-2"));
+      assertEquals(EXIT_SUCCESS, result);
+    } catch (IOException | YarnException e) {
+      fail();
+    }
+  }
+
+  @Test
+  public void testComponentsUpgrade() {
+    String appName = "example-app";
+    try {
+      int result = asc.actionUpgradeComponents(appName, Lists.newArrayList(
+          "comp"));
+      assertEquals(EXIT_SUCCESS, result);
+    } catch (IOException | YarnException e) {
+      fail();
+    }
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSystemServiceManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSystemServiceManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSystemServiceManagerImpl.java
new file mode 100644
index 0000000..d39083d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSystemServiceManagerImpl.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.client;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Test class for system service manager.
+ */
+public class TestSystemServiceManagerImpl {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestSystemServiceManagerImpl.class);
+  private SystemServiceManagerImpl systemService;
+  private Configuration conf;
+  private String resourcePath = "system-services";
+
+  private String[] users = new String[] {"user1", "user2"};
+  private static Map<String, Set<String>> loadedServices = new HashMap<>();
+  private static Map<String, Set<String>> submittedServices = new HashMap<>();
+
+  @Before
+  public void setup() {
+    File file = new File(
+        getClass().getClassLoader().getResource(resourcePath).getFile());
+    conf = new Configuration();
+    conf.set(YarnServiceConf.YARN_SERVICES_SYSTEM_SERVICE_DIRECTORY,
+        file.getAbsolutePath());
+    systemService = new SystemServiceManagerImpl() {
+      @Override ServiceClient getServiceClient() {
+        return new TestServiceClient();
+      }
+    };
+    systemService.init(conf); // do not call explicit start
+
+    constructUserService(users[0], "example-app1");
+    constructUserService(users[1], "example-app1", "example-app2");
+  }
+
+  @After
+  public void teadDown() {
+    systemService.stop();
+  }
+
+  @Test
+  public void testSystemServiceSubmission() throws Exception {
+    systemService.start();
+
+    /* verify for ignored sevices count */
+    Map<String, Integer> ignoredUserServices =
+        systemService.getIgnoredUserServices();
+    Assert.assertEquals(1, ignoredUserServices.size());
+    Assert.assertTrue("User user1 doesn't exist.",
+        ignoredUserServices.containsKey(users[0]));
+    int count = ignoredUserServices.get(users[0]);
+    Assert.assertEquals(1, count);
+    Assert.assertEquals(1,
+        systemService.getBadFileNameExtensionSkipCounter());
+    Assert.assertEquals(1, systemService.getBadDirSkipCounter());
+
+    Map<String, Set<Service>> userServices =
+        systemService.getSyncUserServices();
+    Assert.assertEquals(loadedServices.size(), userServices.size());
+    verifyForScannedUserServices(userServices);
+
+    verifyForLaunchedUserServices();
+
+    // 2nd time launch service to handle if service exist scenario
+    systemService.launchUserService(userServices);
+    verifyForLaunchedUserServices();
+  }
+
+  private void verifyForScannedUserServices(
+      Map<String, Set<Service>> userServices) {
+    for (String user : users) {
+      Set<Service> services = userServices.get(user);
+      Set<String> serviceNames = loadedServices.get(user);
+      Assert.assertEquals(serviceNames.size(), services.size());
+      Iterator<Service> iterator = services.iterator();
+      while (iterator.hasNext()) {
+        Service next = iterator.next();
+        Assert.assertTrue(
+            "Service name doesn't exist in expected userService "
+                + serviceNames, serviceNames.contains(next.getName()));
+      }
+    }
+  }
+
+  public void constructUserService(String user, String... serviceNames) {
+    Set<String> service = loadedServices.get(user);
+    if (service == null) {
+      service = new HashSet<>();
+      for (String serviceName : serviceNames) {
+        service.add(serviceName);
+      }
+      loadedServices.put(user, service);
+    }
+  }
+
+  class TestServiceClient extends ServiceClient {
+    @Override
+    protected void serviceStart() throws Exception {
+      // do nothing
+    }
+
+    @Override
+    protected void serviceStop() throws Exception {
+      // do nothing
+    }
+
+    @Override
+    protected void serviceInit(Configuration configuration)
+        throws Exception {
+      // do nothing
+    }
+
+    @Override
+    public ApplicationId actionCreate(Service service)
+        throws YarnException, IOException {
+      String userName =
+          UserGroupInformation.getCurrentUser().getShortUserName();
+      Set<String> services = submittedServices.get(userName);
+      if (services == null) {
+        services = new HashSet<>();
+        submittedServices.put(userName, services);
+      }
+      if (services.contains(service.getName())) {
+        String message = "Failed to create service " + service.getName()
+            + ", because it already exists.";
+        throw new YarnException(message);
+      }
+      services.add(service.getName());
+      return ApplicationId.newInstance(System.currentTimeMillis(), 1);
+    }
+  }
+
+  private void verifyForLaunchedUserServices() {
+    Assert.assertEquals(loadedServices.size(), submittedServices.size());
+    for (Map.Entry<String, Set<String>> entry : submittedServices.entrySet()) {
+      String user = entry.getKey();
+      Set<String> serviceSet = entry.getValue();
+      Assert.assertTrue(loadedServices.containsKey(user));
+      Set<String> services = loadedServices.get(user);
+      Assert.assertEquals(services.size(), serviceSet.size());
+      Assert.assertTrue(services.containsAll(serviceSet));
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: YARN-7530. Refactored YARN service API project location. Contributed by Chandni Singh

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
deleted file mode 100644
index 733b9bc..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
+++ /dev/null
@@ -1,623 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.service;
-
-import static org.junit.Assert.*;
-
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileWriter;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.ws.rs.Path;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.Status;
-
-import com.google.common.collect.Sets;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.service.api.records.Artifact;
-import org.apache.hadoop.yarn.service.api.records.Artifact.TypeEnum;
-import org.apache.hadoop.yarn.service.api.records.Component;
-import org.apache.hadoop.yarn.service.api.records.ComponentState;
-import org.apache.hadoop.yarn.service.api.records.Container;
-import org.apache.hadoop.yarn.service.api.records.ContainerState;
-import org.apache.hadoop.yarn.service.api.records.Resource;
-import org.apache.hadoop.yarn.service.api.records.Service;
-import org.apache.hadoop.yarn.service.api.records.ServiceState;
-import org.apache.hadoop.yarn.service.api.records.ServiceStatus;
-import org.apache.hadoop.yarn.service.conf.RestApiConstants;
-import org.apache.hadoop.yarn.service.webapp.ApiServer;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-/**
- * Test case for ApiServer REST API.
- *
- */
-public class TestApiServer {
-  private ApiServer apiServer;
-  private HttpServletRequest request;
-  private ServiceClientTest mockServerClient;
-
-  @Before
-  public void setup() throws Exception {
-    request = Mockito.mock(HttpServletRequest.class);
-    Mockito.when(request.getRemoteUser())
-        .thenReturn(System.getProperty("user.name"));
-    mockServerClient = new ServiceClientTest();
-    Configuration conf = new Configuration();
-    conf.set("yarn.api-service.service.client.class",
-        ServiceClientTest.class.getName());
-    apiServer = new ApiServer(conf);
-    apiServer.setServiceClient(mockServerClient);
-  }
-
-  @After
-  public void teardown() {
-    mockServerClient.forceStop();
-  }
-
-  @Test
-  public void testPathAnnotation() {
-    assertNotNull(this.apiServer.getClass().getAnnotation(Path.class));
-    assertTrue("The controller has the annotation Path",
-        this.apiServer.getClass().isAnnotationPresent(Path.class));
-    final Path path = this.apiServer.getClass()
-        .getAnnotation(Path.class);
-    assertEquals("The path has /v1 annotation", "/v1", path.value());
-  }
-
-  @Test
-  public void testGetVersion() {
-    final Response actual = apiServer.getVersion();
-    assertEquals("Version number is", Response.ok().build().getStatus(),
-        actual.getStatus());
-  }
-
-  @Test
-  public void testBadCreateService() {
-    Service service = new Service();
-    // Test for invalid argument
-    final Response actual = apiServer.createService(request, service);
-    assertEquals("Create service is ",
-        Response.status(Status.BAD_REQUEST).build().getStatus(),
-        actual.getStatus());
-  }
-
-  @Test
-  public void testGoodCreateService() throws Exception {
-    String json = "{\"auths\": "
-        + "{\"https://index.docker.io/v1/\": "
-        + "{\"auth\": \"foobarbaz\"},"
-        + "\"registry.example.com\": "
-        + "{\"auth\": \"bazbarfoo\"}}}";
-    File dockerTmpDir = new File("target", "docker-tmp");
-    FileUtils.deleteQuietly(dockerTmpDir);
-    dockerTmpDir.mkdirs();
-    String dockerConfig = dockerTmpDir + "/config.json";
-    BufferedWriter bw = new BufferedWriter(new FileWriter(dockerConfig));
-    bw.write(json);
-    bw.close();
-    Service service = ServiceClientTest.buildGoodService();
-    final Response actual = apiServer.createService(request, service);
-    assertEquals("Create service is ",
-        Response.status(Status.ACCEPTED).build().getStatus(),
-        actual.getStatus());
-  }
-
-  @Test
-  public void testInternalServerErrorDockerClientConfigMissingCreateService() {
-    Service service = new Service();
-    service.setName("jenkins");
-    service.setVersion("v1");
-    service.setDockerClientConfig("/does/not/exist/config.json");
-    Artifact artifact = new Artifact();
-    artifact.setType(TypeEnum.DOCKER);
-    artifact.setId("jenkins:latest");
-    Resource resource = new Resource();
-    resource.setCpus(1);
-    resource.setMemory("2048");
-    List<Component> components = new ArrayList<>();
-    Component c = new Component();
-    c.setName("jenkins");
-    c.setNumberOfContainers(1L);
-    c.setArtifact(artifact);
-    c.setLaunchCommand("");
-    c.setResource(resource);
-    components.add(c);
-    service.setComponents(components);
-    final Response actual = apiServer.createService(request, service);
-    assertEquals("Create service is ",
-        Response.status(Status.BAD_REQUEST).build().getStatus(),
-        actual.getStatus());
-  }
-
-  @Test
-  public void testBadGetService() {
-    final String serviceName = "nonexistent-jenkins";
-    final Response actual = apiServer.getService(request, serviceName);
-    assertEquals("Get service is ",
-        Response.status(Status.NOT_FOUND).build().getStatus(),
-        actual.getStatus());
-    ServiceStatus serviceStatus = (ServiceStatus) actual.getEntity();
-    assertEquals("Response code don't match",
-        RestApiConstants.ERROR_CODE_APP_NAME_INVALID, serviceStatus.getCode());
-    assertEquals("Response diagnostics don't match",
-        "Service " + serviceName + " not found",
-        serviceStatus.getDiagnostics());
-  }
-
-  @Test
-  public void testBadGetService2() {
-    final Response actual = apiServer.getService(request, null);
-    assertEquals("Get service is ",
-        Response.status(Status.NOT_FOUND).build().getStatus(),
-        actual.getStatus());
-    ServiceStatus serviceStatus = (ServiceStatus) actual.getEntity();
-    assertEquals("Response code don't match",
-        RestApiConstants.ERROR_CODE_APP_NAME_INVALID, serviceStatus.getCode());
-    assertEquals("Response diagnostics don't match",
-        "Service name cannot be null.", serviceStatus.getDiagnostics());
-  }
-
-  @Test
-  public void testGoodGetService() {
-    final Response actual = apiServer.getService(request, "jenkins");
-    assertEquals("Get service is ",
-        Response.status(Status.OK).build().getStatus(), actual.getStatus());
-  }
-
-  @Test
-  public void testBadDeleteService() {
-    final Response actual = apiServer.deleteService(request, "no-jenkins");
-    assertEquals("Delete service is ",
-        Response.status(Status.BAD_REQUEST).build().getStatus(),
-        actual.getStatus());
-  }
-
-  @Test
-  public void testBadDeleteService2() {
-    final Response actual = apiServer.deleteService(request, null);
-    assertEquals("Delete service is ",
-        Response.status(Status.BAD_REQUEST).build().getStatus(),
-        actual.getStatus());
-  }
-
-  @Test
-  public void testBadDeleteService3() {
-    final Response actual = apiServer.deleteService(request,
-        "jenkins-doesn't-exist");
-    assertEquals("Delete service is ",
-        Response.status(Status.BAD_REQUEST).build().getStatus(),
-        actual.getStatus());
-  }
-
-  @Test
-  public void testBadDeleteService4() {
-    final Response actual = apiServer.deleteService(request,
-        "jenkins-error-cleaning-registry");
-    assertEquals("Delete service is ",
-        Response.status(Status.INTERNAL_SERVER_ERROR).build().getStatus(),
-        actual.getStatus());
-  }
-
-  @Test
-  public void testGoodDeleteService() {
-    final Response actual = apiServer.deleteService(request, "jenkins");
-    assertEquals("Delete service is ",
-        Response.status(Status.OK).build().getStatus(), actual.getStatus());
-  }
-
-  @Test
-  public void testDeleteStoppedService() {
-    final Response actual = apiServer.deleteService(request,
-        "jenkins-already-stopped");
-    assertEquals("Delete service is ",
-        Response.status(Status.OK).build().getStatus(), actual.getStatus());
-  }
-
-  @Test
-  public void testDecreaseContainerAndStop() {
-    Service service = new Service();
-    service.setState(ServiceState.STOPPED);
-    service.setName("jenkins");
-    Artifact artifact = new Artifact();
-    artifact.setType(TypeEnum.DOCKER);
-    artifact.setId("jenkins:latest");
-    Resource resource = new Resource();
-    resource.setCpus(1);
-    resource.setMemory("2048");
-    List<Component> components = new ArrayList<Component>();
-    Component c = new Component();
-    c.setName("jenkins");
-    c.setNumberOfContainers(0L);
-    c.setArtifact(artifact);
-    c.setLaunchCommand("");
-    c.setResource(resource);
-    components.add(c);
-    service.setComponents(components);
-    final Response actual = apiServer.updateService(request, "jenkins",
-        service);
-    assertEquals("update service is ",
-        Response.status(Status.OK).build().getStatus(), actual.getStatus());
-  }
-
-  @Test
-  public void testBadDecreaseContainerAndStop() {
-    Service service = new Service();
-    service.setState(ServiceState.STOPPED);
-    service.setName("no-jenkins");
-    Artifact artifact = new Artifact();
-    artifact.setType(TypeEnum.DOCKER);
-    artifact.setId("jenkins:latest");
-    Resource resource = new Resource();
-    resource.setCpus(1);
-    resource.setMemory("2048");
-    List<Component> components = new ArrayList<Component>();
-    Component c = new Component();
-    c.setName("no-jenkins");
-    c.setNumberOfContainers(-1L);
-    c.setArtifact(artifact);
-    c.setLaunchCommand("");
-    c.setResource(resource);
-    components.add(c);
-    service.setComponents(components);
-    System.out.println("before stop");
-    final Response actual = apiServer.updateService(request, "no-jenkins",
-        service);
-    assertEquals("flex service is ",
-        Response.status(Status.BAD_REQUEST).build().getStatus(),
-        actual.getStatus());
-  }
-
-  @Test
-  public void testIncreaseContainersAndStart() {
-    Service service = new Service();
-    service.setState(ServiceState.STARTED);
-    service.setName("jenkins");
-    Artifact artifact = new Artifact();
-    artifact.setType(TypeEnum.DOCKER);
-    artifact.setId("jenkins:latest");
-    Resource resource = new Resource();
-    resource.setCpus(1);
-    resource.setMemory("2048");
-    List<Component> components = new ArrayList<Component>();
-    Component c = new Component();
-    c.setName("jenkins");
-    c.setNumberOfContainers(2L);
-    c.setArtifact(artifact);
-    c.setLaunchCommand("");
-    c.setResource(resource);
-    components.add(c);
-    service.setComponents(components);
-    final Response actual = apiServer.updateService(request, "jenkins",
-        service);
-    assertEquals("flex service is ",
-        Response.status(Status.OK).build().getStatus(), actual.getStatus());
-  }
-
-  @Test
-  public void testBadStartServices() {
-    Service service = new Service();
-    service.setState(ServiceState.STARTED);
-    service.setName("no-jenkins");
-    Artifact artifact = new Artifact();
-    artifact.setType(TypeEnum.DOCKER);
-    artifact.setId("jenkins:latest");
-    Resource resource = new Resource();
-    resource.setCpus(1);
-    resource.setMemory("2048");
-    List<Component> components = new ArrayList<Component>();
-    Component c = new Component();
-    c.setName("jenkins");
-    c.setNumberOfContainers(2L);
-    c.setArtifact(artifact);
-    c.setLaunchCommand("");
-    c.setResource(resource);
-    components.add(c);
-    service.setComponents(components);
-    final Response actual = apiServer.updateService(request, "no-jenkins",
-        service);
-    assertEquals("start service is ",
-        Response.status(Status.BAD_REQUEST).build().getStatus(),
-        actual.getStatus());
-  }
-
-  @Test
-  public void testGoodStartServices() {
-    Service service = new Service();
-    service.setState(ServiceState.STARTED);
-    service.setName("jenkins");
-    Artifact artifact = new Artifact();
-    artifact.setType(TypeEnum.DOCKER);
-    artifact.setId("jenkins:latest");
-    Resource resource = new Resource();
-    resource.setCpus(1);
-    resource.setMemory("2048");
-    List<Component> components = new ArrayList<Component>();
-    Component c = new Component();
-    c.setName("jenkins");
-    c.setNumberOfContainers(2L);
-    c.setArtifact(artifact);
-    c.setLaunchCommand("");
-    c.setResource(resource);
-    components.add(c);
-    service.setComponents(components);
-    final Response actual = apiServer.updateService(request, "jenkins",
-        service);
-    assertEquals("start service is ",
-        Response.status(Status.OK).build().getStatus(), actual.getStatus());
-  }
-
-  @Test
-  public void testBadStopServices() {
-    Service service = new Service();
-    service.setState(ServiceState.STOPPED);
-    service.setName("no-jenkins");
-    Artifact artifact = new Artifact();
-    artifact.setType(TypeEnum.DOCKER);
-    artifact.setId("jenkins:latest");
-    Resource resource = new Resource();
-    resource.setCpus(1);
-    resource.setMemory("2048");
-    List<Component> components = new ArrayList<Component>();
-    Component c = new Component();
-    c.setName("no-jenkins");
-    c.setNumberOfContainers(-1L);
-    c.setArtifact(artifact);
-    c.setLaunchCommand("");
-    c.setResource(resource);
-    components.add(c);
-    service.setComponents(components);
-    System.out.println("before stop");
-    final Response actual = apiServer.updateService(request, "no-jenkins",
-        service);
-    assertEquals("stop service is ",
-        Response.status(Status.BAD_REQUEST).build().getStatus(),
-        actual.getStatus());
-  }
-
-  @Test
-  public void testGoodStopServices() {
-    Service service = new Service();
-    service.setState(ServiceState.STOPPED);
-    service.setName("jenkins");
-    System.out.println("before stop");
-    final Response actual = apiServer.updateService(request, "jenkins",
-        service);
-    assertEquals("stop service is ",
-        Response.status(Status.OK).build().getStatus(), actual.getStatus());
-  }
-
-  @Test
-  public void testBadSecondStopServices() throws Exception {
-    Service service = new Service();
-    service.setState(ServiceState.STOPPED);
-    service.setName("jenkins-second-stop");
-    // simulates stop on an already stopped service
-    System.out.println("before second stop");
-    final Response actual = apiServer.updateService(request,
-        "jenkins-second-stop", service);
-    assertEquals("stop service should have thrown 400 Bad Request: ",
-        Response.status(Status.BAD_REQUEST).build().getStatus(),
-        actual.getStatus());
-    ServiceStatus serviceStatus = (ServiceStatus) actual.getEntity();
-    assertEquals("Stop service should have failed with service already stopped",
-        "Service jenkins-second-stop is already stopped",
-        serviceStatus.getDiagnostics());
-  }
-
-  @Test
-  public void testUpdateService() {
-    Service service = new Service();
-    service.setState(ServiceState.STARTED);
-    service.setName("no-jenkins");
-    Artifact artifact = new Artifact();
-    artifact.setType(TypeEnum.DOCKER);
-    artifact.setId("jenkins:latest");
-    Resource resource = new Resource();
-    resource.setCpus(1);
-    resource.setMemory("2048");
-    List<Component> components = new ArrayList<Component>();
-    Component c = new Component();
-    c.setName("no-jenkins");
-    c.setNumberOfContainers(-1L);
-    c.setArtifact(artifact);
-    c.setLaunchCommand("");
-    c.setResource(resource);
-    components.add(c);
-    service.setComponents(components);
-    System.out.println("before stop");
-    final Response actual = apiServer.updateService(request, "no-jenkins",
-        service);
-    assertEquals("update service is ",
-        Response.status(Status.BAD_REQUEST)
-            .build().getStatus(), actual.getStatus());
-  }
-
-  @Test
-  public void testUpdateComponent() {
-    Response actual = apiServer.updateComponent(request, "jenkins",
-        "jenkins-master", null);
-    ServiceStatus serviceStatus = (ServiceStatus) actual.getEntity();
-    assertEquals("Update component should have failed with 400 bad request",
-        Response.status(Status.BAD_REQUEST).build().getStatus(),
-        actual.getStatus());
-    assertEquals("Update component should have failed with no data error",
-        "No component data provided", serviceStatus.getDiagnostics());
-
-    Component comp = new Component();
-    actual = apiServer.updateComponent(request, "jenkins", "jenkins-master",
-        comp);
-    serviceStatus = (ServiceStatus) actual.getEntity();
-    assertEquals("Update component should have failed with 400 bad request",
-        Response.status(Status.BAD_REQUEST).build().getStatus(),
-        actual.getStatus());
-    assertEquals("Update component should have failed with no count error",
-        "No container count provided", serviceStatus.getDiagnostics());
-
-    comp.setNumberOfContainers(-1L);
-    actual = apiServer.updateComponent(request, "jenkins", "jenkins-master",
-        comp);
-    serviceStatus = (ServiceStatus) actual.getEntity();
-    assertEquals("Update component should have failed with 400 bad request",
-        Response.status(Status.BAD_REQUEST).build().getStatus(),
-        actual.getStatus());
-    assertEquals("Update component should have failed with no count error",
-        "Invalid number of containers specified -1", serviceStatus.getDiagnostics());
-
-    comp.setName("jenkins-slave");
-    comp.setNumberOfContainers(1L);
-    actual = apiServer.updateComponent(request, "jenkins", "jenkins-master",
-        comp);
-    serviceStatus = (ServiceStatus) actual.getEntity();
-    assertEquals("Update component should have failed with 400 bad request",
-        Response.status(Status.BAD_REQUEST).build().getStatus(),
-        actual.getStatus());
-    assertEquals(
-        "Update component should have failed with component name mismatch "
-            + "error",
-        "Component name in the request object (jenkins-slave) does not match "
-            + "that in the URI path (jenkins-master)",
-        serviceStatus.getDiagnostics());
-  }
-
-  @Test
-  public void testInitiateUpgrade() {
-    Service goodService = ServiceClientTest.buildLiveGoodService();
-    goodService.setVersion("v2");
-    goodService.setState(ServiceState.UPGRADING);
-    final Response actual = apiServer.updateService(request,
-        goodService.getName(), goodService);
-    assertEquals("Initiate upgrade is ",
-        Response.status(Status.ACCEPTED).build().getStatus(),
-        actual.getStatus());
-  }
-
-  @Test
-  public void testUpgradeSingleInstance() {
-    Service goodService = ServiceClientTest.buildLiveGoodService();
-    Component comp = goodService.getComponents().iterator().next();
-    Container container = comp.getContainers().iterator().next();
-    container.setState(ContainerState.UPGRADING);
-
-    // To be able to upgrade, the service needs to be in UPGRADING
-    // and container state needs to be in NEEDS_UPGRADE.
-    Service serviceStatus = mockServerClient.getGoodServiceStatus();
-    serviceStatus.setState(ServiceState.UPGRADING);
-    Container liveContainer = serviceStatus.getComponents().iterator().next()
-        .getContainers().iterator().next();
-    liveContainer.setState(ContainerState.NEEDS_UPGRADE);
-    mockServerClient.setExpectedInstances(Sets.newHashSet(
-        liveContainer.getComponentInstanceName()));
-
-    final Response actual = apiServer.updateComponentInstance(request,
-        goodService.getName(), comp.getName(),
-        container.getComponentInstanceName(), container);
-    assertEquals("Instance upgrade is ",
-        Response.status(Status.ACCEPTED).build().getStatus(),
-        actual.getStatus());
-  }
-
-  @Test
-  public void testUpgradeMultipleInstances() {
-    Service goodService = ServiceClientTest.buildLiveGoodService();
-    Component comp = goodService.getComponents().iterator().next();
-    comp.getContainers().forEach(container ->
-        container.setState(ContainerState.UPGRADING));
-
-    // To be able to upgrade, the service needs to be in UPGRADING
-    // and container state needs to be in NEEDS_UPGRADE.
-    Service serviceStatus = mockServerClient.getGoodServiceStatus();
-    serviceStatus.setState(ServiceState.UPGRADING);
-    Set<String> expectedInstances = new HashSet<>();
-    serviceStatus.getComponents().iterator().next().getContainers().forEach(
-        container -> {
-          container.setState(ContainerState.NEEDS_UPGRADE);
-          expectedInstances.add(container.getComponentInstanceName());
-        }
-    );
-    mockServerClient.setExpectedInstances(expectedInstances);
-
-    final Response actual = apiServer.updateComponentInstances(request,
-        goodService.getName(), comp.getContainers());
-    assertEquals("Instance upgrade is ",
-        Response.status(Status.ACCEPTED).build().getStatus(),
-        actual.getStatus());
-  }
-
-  @Test
-  public void testUpgradeComponent() {
-    Service goodService = ServiceClientTest.buildLiveGoodService();
-    Component comp = goodService.getComponents().iterator().next();
-    comp.setState(ComponentState.UPGRADING);
-
-    // To be able to upgrade, the service needs to be in UPGRADING
-    // and component state needs to be in NEEDS_UPGRADE.
-    Service serviceStatus = mockServerClient.getGoodServiceStatus();
-    serviceStatus.setState(ServiceState.UPGRADING);
-    Component liveComp = serviceStatus.getComponent(comp.getName());
-    liveComp.setState(ComponentState.NEEDS_UPGRADE);
-    Set<String> expectedInstances = new HashSet<>();
-    liveComp.getContainers().forEach(container -> {
-      expectedInstances.add(container.getComponentInstanceName());
-      container.setState(ContainerState.NEEDS_UPGRADE);
-    });
-    mockServerClient.setExpectedInstances(expectedInstances);
-
-    final Response actual = apiServer.updateComponent(request,
-        goodService.getName(), comp.getName(), comp);
-    assertEquals("Component upgrade is ",
-        Response.status(Status.ACCEPTED).build().getStatus(),
-        actual.getStatus());
-  }
-
-  @Test
-  public void testUpgradeMultipleComps() {
-    Service goodService = ServiceClientTest.buildLiveGoodService();
-    goodService.getComponents().forEach(comp ->
-        comp.setState(ComponentState.UPGRADING));
-
-    // To be able to upgrade, the live service needs to be in UPGRADING
-    // and component states needs to be in NEEDS_UPGRADE.
-    Service serviceStatus = mockServerClient.getGoodServiceStatus();
-    serviceStatus.setState(ServiceState.UPGRADING);
-    Set<String> expectedInstances = new HashSet<>();
-    serviceStatus.getComponents().forEach(liveComp -> {
-      liveComp.setState(ComponentState.NEEDS_UPGRADE);
-      liveComp.getContainers().forEach(liveContainer -> {
-        expectedInstances.add(liveContainer.getComponentInstanceName());
-        liveContainer.setState(ContainerState.NEEDS_UPGRADE);
-      });
-    });
-    mockServerClient.setExpectedInstances(expectedInstances);
-
-    final Response actual = apiServer.updateComponents(request,
-        goodService.getName(), goodService.getComponents());
-    assertEquals("Component upgrade is ",
-        Response.status(Status.ACCEPTED).build().getStatus(),
-        actual.getStatus());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java
deleted file mode 100644
index 6cf0880..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestApiServiceClient.java
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.service.client;
-
-import static org.junit.Assert.*;
-
-import java.io.IOException;
-import java.util.HashMap;
-
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.eclipse.jetty.server.Server;
-import org.eclipse.jetty.server.ServerConnector;
-import org.eclipse.jetty.servlet.ServletContextHandler;
-import org.eclipse.jetty.servlet.ServletHolder;
-import org.eclipse.jetty.util.thread.QueuedThreadPool;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import static org.apache.hadoop.yarn.service.exceptions.LauncherExitCodes.*;
-
-/**
- * Test case for CLI to API Service.
- *
- */
-public class TestApiServiceClient {
-  private static ApiServiceClient asc;
-  private static ApiServiceClient badAsc;
-  private static Server server;
-
-  /**
-   * A mocked version of API Service for testing purpose.
-   *
-   */
-  @SuppressWarnings("serial")
-  public static class TestServlet extends HttpServlet {
-
-    @Override
-    protected void doGet(HttpServletRequest req, HttpServletResponse resp)
-        throws ServletException, IOException {
-      System.out.println("Get was called");
-      if (req.getPathInfo() != null
-          && req.getPathInfo().contains("nonexistent-app")) {
-        resp.setStatus(HttpServletResponse.SC_NOT_FOUND);
-      } else {
-        resp.setStatus(HttpServletResponse.SC_OK);
-      }
-    }
-
-    @Override
-    protected void doPost(HttpServletRequest req, HttpServletResponse resp)
-        throws ServletException, IOException {
-      resp.setStatus(HttpServletResponse.SC_OK);
-    }
-
-    @Override
-    protected void doPut(HttpServletRequest req, HttpServletResponse resp)
-        throws ServletException, IOException {
-      resp.setStatus(HttpServletResponse.SC_OK);
-    }
-
-    @Override
-    protected void doDelete(HttpServletRequest req, HttpServletResponse resp)
-        throws ServletException, IOException {
-      resp.setStatus(HttpServletResponse.SC_OK);
-    }
-
-  }
-
-  @BeforeClass
-  public static void setup() throws Exception {
-    server = new Server(8088);
-    ((QueuedThreadPool)server.getThreadPool()).setMaxThreads(10);
-    ServletContextHandler context = new ServletContextHandler();
-    context.setContextPath("/app");
-    server.setHandler(context);
-    context.addServlet(new ServletHolder(TestServlet.class), "/*");
-    ((ServerConnector)server.getConnectors()[0]).setHost("localhost");
-    server.start();
-
-    Configuration conf = new Configuration();
-    conf.set("yarn.resourcemanager.webapp.address",
-        "localhost:8088");
-    asc = new ApiServiceClient();
-    asc.serviceInit(conf);
-
-    Configuration conf2 = new Configuration();
-    conf2.set("yarn.resourcemanager.webapp.address",
-        "localhost:8089");
-    badAsc = new ApiServiceClient();
-    badAsc.serviceInit(conf2);
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    server.stop();
-  }
-
-  @Test
-  public void testLaunch() {
-    String fileName = "target/test-classes/example-app.json";
-    String appName = "example-app";
-    long lifetime = 3600L;
-    String queue = "default";
-    try {
-      int result = asc.actionLaunch(fileName, appName, lifetime, queue);
-      assertEquals(EXIT_SUCCESS, result);
-    } catch (IOException | YarnException e) {
-      fail();
-    }
-  }
-
-  @Test
-  public void testBadLaunch() {
-    String fileName = "unknown_file";
-    String appName = "unknown_app";
-    long lifetime = 3600L;
-    String queue = "default";
-    try {
-      int result = badAsc.actionLaunch(fileName, appName, lifetime, queue);
-      assertEquals(EXIT_EXCEPTION_THROWN, result);
-    } catch (IOException | YarnException e) {
-      fail();
-    }
-  }
-
-  @Test
-  public void testStatus() {
-    String appName = "nonexistent-app";
-    try {
-      String result = asc.getStatusString(appName);
-      assertEquals("Status reponse don't match",
-          " Service " + appName + " not found", result);
-    } catch (IOException | YarnException e) {
-      fail();
-    }
-  }
-
-  @Test
-  public void testStop() {
-    String appName = "example-app";
-    try {
-      int result = asc.actionStop(appName);
-      assertEquals(EXIT_SUCCESS, result);
-    } catch (IOException | YarnException e) {
-      fail();
-    }
-  }
-
-  @Test
-  public void testBadStop() {
-    String appName = "unknown_app";
-    try {
-      int result = badAsc.actionStop(appName);
-      assertEquals(EXIT_EXCEPTION_THROWN, result);
-    } catch (IOException | YarnException e) {
-      fail();
-    }
-  }
-
-  @Test
-  public void testStart() {
-    String appName = "example-app";
-    try {
-      int result = asc.actionStart(appName);
-      assertEquals(EXIT_SUCCESS, result);
-    } catch (IOException | YarnException e) {
-      fail();
-    }
-  }
-
-  @Test
-  public void testBadStart() {
-    String appName = "unknown_app";
-    try {
-      int result = badAsc.actionStart(appName);
-      assertEquals(EXIT_EXCEPTION_THROWN, result);
-    } catch (IOException | YarnException e) {
-      fail();
-    }
-  }
-
-  @Test
-  public void testSave() {
-    String fileName = "target/test-classes/example-app.json";
-    String appName = "example-app";
-    long lifetime = 3600L;
-    String queue = "default";
-    try {
-      int result = asc.actionSave(fileName, appName, lifetime, queue);
-      assertEquals(EXIT_SUCCESS, result);
-    } catch (IOException | YarnException e) {
-      fail();
-    }
-  }
-
-  @Test
-  public void testBadSave() {
-    String fileName = "unknown_file";
-    String appName = "unknown_app";
-    long lifetime = 3600L;
-    String queue = "default";
-    try {
-      int result = badAsc.actionSave(fileName, appName, lifetime, queue);
-      assertEquals(EXIT_EXCEPTION_THROWN, result);
-    } catch (IOException | YarnException e) {
-      fail();
-    }
-  }
-
-  @Test
-  public void testFlex() {
-    String appName = "example-app";
-    HashMap<String, String> componentCounts = new HashMap<String, String>();
-    try {
-      int result = asc.actionFlex(appName, componentCounts);
-      assertEquals(EXIT_SUCCESS, result);
-    } catch (IOException | YarnException e) {
-      fail();
-    }
-  }
-
-  @Test
-  public void testBadFlex() {
-    String appName = "unknown_app";
-    HashMap<String, String> componentCounts = new HashMap<String, String>();
-    try {
-      int result = badAsc.actionFlex(appName, componentCounts);
-      assertEquals(EXIT_EXCEPTION_THROWN, result);
-    } catch (IOException | YarnException e) {
-      fail();
-    }
-  }
-
-  @Test
-  public void testDestroy() {
-    String appName = "example-app";
-    try {
-      int result = asc.actionDestroy(appName);
-      assertEquals(EXIT_SUCCESS, result);
-    } catch (IOException | YarnException e) {
-      fail();
-    }
-  }
-
-  @Test
-  public void testBadDestroy() {
-    String appName = "unknown_app";
-    try {
-      int result = badAsc.actionDestroy(appName);
-      assertEquals(EXIT_EXCEPTION_THROWN, result);
-    } catch (IOException | YarnException e) {
-      fail();
-    }
-  }
-
-  @Test
-  public void testInitiateServiceUpgrade() {
-    String appName = "example-app";
-    String upgradeFileName = "target/test-classes/example-app.json";
-    try {
-      int result = asc.initiateUpgrade(appName, upgradeFileName, false);
-      assertEquals(EXIT_SUCCESS, result);
-    } catch (IOException | YarnException e) {
-      fail();
-    }
-  }
-
-  @Test
-  public void testInstancesUpgrade() {
-    String appName = "example-app";
-    try {
-      int result = asc.actionUpgradeInstances(appName, Lists.newArrayList(
-          "comp-1", "comp-2"));
-      assertEquals(EXIT_SUCCESS, result);
-    } catch (IOException | YarnException e) {
-      fail();
-    }
-  }
-
-  @Test
-  public void testComponentsUpgrade() {
-    String appName = "example-app";
-    try {
-      int result = asc.actionUpgradeComponents(appName, Lists.newArrayList(
-          "comp"));
-      assertEquals(EXIT_SUCCESS, result);
-    } catch (IOException | YarnException e) {
-      fail();
-    }
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSystemServiceManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSystemServiceManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSystemServiceManagerImpl.java
deleted file mode 100644
index d39083d..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSystemServiceManagerImpl.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.service.client;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.service.api.records.Service;
-import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Test class for system service manager.
- */
-public class TestSystemServiceManagerImpl {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestSystemServiceManagerImpl.class);
-  private SystemServiceManagerImpl systemService;
-  private Configuration conf;
-  private String resourcePath = "system-services";
-
-  private String[] users = new String[] {"user1", "user2"};
-  private static Map<String, Set<String>> loadedServices = new HashMap<>();
-  private static Map<String, Set<String>> submittedServices = new HashMap<>();
-
-  @Before
-  public void setup() {
-    File file = new File(
-        getClass().getClassLoader().getResource(resourcePath).getFile());
-    conf = new Configuration();
-    conf.set(YarnServiceConf.YARN_SERVICES_SYSTEM_SERVICE_DIRECTORY,
-        file.getAbsolutePath());
-    systemService = new SystemServiceManagerImpl() {
-      @Override ServiceClient getServiceClient() {
-        return new TestServiceClient();
-      }
-    };
-    systemService.init(conf); // do not call explicit start
-
-    constructUserService(users[0], "example-app1");
-    constructUserService(users[1], "example-app1", "example-app2");
-  }
-
-  @After
-  public void teadDown() {
-    systemService.stop();
-  }
-
-  @Test
-  public void testSystemServiceSubmission() throws Exception {
-    systemService.start();
-
-    /* verify for ignored sevices count */
-    Map<String, Integer> ignoredUserServices =
-        systemService.getIgnoredUserServices();
-    Assert.assertEquals(1, ignoredUserServices.size());
-    Assert.assertTrue("User user1 doesn't exist.",
-        ignoredUserServices.containsKey(users[0]));
-    int count = ignoredUserServices.get(users[0]);
-    Assert.assertEquals(1, count);
-    Assert.assertEquals(1,
-        systemService.getBadFileNameExtensionSkipCounter());
-    Assert.assertEquals(1, systemService.getBadDirSkipCounter());
-
-    Map<String, Set<Service>> userServices =
-        systemService.getSyncUserServices();
-    Assert.assertEquals(loadedServices.size(), userServices.size());
-    verifyForScannedUserServices(userServices);
-
-    verifyForLaunchedUserServices();
-
-    // 2nd time launch service to handle if service exist scenario
-    systemService.launchUserService(userServices);
-    verifyForLaunchedUserServices();
-  }
-
-  private void verifyForScannedUserServices(
-      Map<String, Set<Service>> userServices) {
-    for (String user : users) {
-      Set<Service> services = userServices.get(user);
-      Set<String> serviceNames = loadedServices.get(user);
-      Assert.assertEquals(serviceNames.size(), services.size());
-      Iterator<Service> iterator = services.iterator();
-      while (iterator.hasNext()) {
-        Service next = iterator.next();
-        Assert.assertTrue(
-            "Service name doesn't exist in expected userService "
-                + serviceNames, serviceNames.contains(next.getName()));
-      }
-    }
-  }
-
-  public void constructUserService(String user, String... serviceNames) {
-    Set<String> service = loadedServices.get(user);
-    if (service == null) {
-      service = new HashSet<>();
-      for (String serviceName : serviceNames) {
-        service.add(serviceName);
-      }
-      loadedServices.put(user, service);
-    }
-  }
-
-  class TestServiceClient extends ServiceClient {
-    @Override
-    protected void serviceStart() throws Exception {
-      // do nothing
-    }
-
-    @Override
-    protected void serviceStop() throws Exception {
-      // do nothing
-    }
-
-    @Override
-    protected void serviceInit(Configuration configuration)
-        throws Exception {
-      // do nothing
-    }
-
-    @Override
-    public ApplicationId actionCreate(Service service)
-        throws YarnException, IOException {
-      String userName =
-          UserGroupInformation.getCurrentUser().getShortUserName();
-      Set<String> services = submittedServices.get(userName);
-      if (services == null) {
-        services = new HashSet<>();
-        submittedServices.put(userName, services);
-      }
-      if (services.contains(service.getName())) {
-        String message = "Failed to create service " + service.getName()
-            + ", because it already exists.";
-        throw new YarnException(message);
-      }
-      services.add(service.getName());
-      return ApplicationId.newInstance(System.currentTimeMillis(), 1);
-    }
-  }
-
-  private void verifyForLaunchedUserServices() {
-    Assert.assertEquals(loadedServices.size(), submittedServices.size());
-    for (Map.Entry<String, Set<String>> entry : submittedServices.entrySet()) {
-      String user = entry.getKey();
-      Set<String> serviceSet = entry.getValue();
-      Assert.assertTrue(loadedServices.containsKey(user));
-      Set<String> services = loadedServices.get(user);
-      Assert.assertEquals(services.size(), serviceSet.size());
-      Assert.assertTrue(services.containsAll(serviceSet));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/example-app.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/example-app.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/example-app.json
deleted file mode 100644
index a2f41cf..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/example-app.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "name": "example-app",
-  "version": "1.0.0",
-  "components" :
-  [
-    {
-      "name": "simple",
-      "number_of_containers": 1,
-      "launch_command": "sleep 2",
-      "resource": {
-        "cpus": 1,
-        "memory": "128"
-      }
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/log4j.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/log4j.properties
deleted file mode 100644
index 81a3f6a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,19 +0,0 @@
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-
-# log4j configuration used during build and unit tests
-
-log4j.rootLogger=info,stdout
-log4j.threshold=ALL
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} (%F:%M(%L)) - %m%n

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/bad/bad.yarnfile
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/bad/bad.yarnfile b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/bad/bad.yarnfile
deleted file mode 100644
index 1d514d6..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/bad/bad.yarnfile
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "name": "bad",
-  "version": "1.0.0",
-  "components" :
-  [
-    {
-      "name": "simple",
-      "number_of_containers": 1,
-      "launch_command": "sleep 2",
-      "resource": {
-        "cpus": 1,
-        "memory": "128"
-      }
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app1.yarnfile
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app1.yarnfile b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app1.yarnfile
deleted file mode 100644
index 823561d..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app1.yarnfile
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "name": "example-app1",
-  "version": "1.0.0",
-  "components" :
-  [
-    {
-      "name": "simple",
-      "number_of_containers": 1,
-      "launch_command": "sleep 2",
-      "resource": {
-        "cpus": 1,
-        "memory": "128"
-      }
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app2.yarnfile
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app2.yarnfile b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app2.yarnfile
deleted file mode 100644
index 823561d..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app2.yarnfile
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "name": "example-app1",
-  "version": "1.0.0",
-  "components" :
-  [
-    {
-      "name": "simple",
-      "number_of_containers": 1,
-      "launch_command": "sleep 2",
-      "resource": {
-        "cpus": 1,
-        "memory": "128"
-      }
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app3.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app3.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app3.json
deleted file mode 100644
index 8a3a561..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app3.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "name": "example-app3",
-  "version": "1.0.0",
-  "components" :
-  [
-    {
-      "name": "simple",
-      "number_of_containers": 1,
-      "launch_command": "sleep 2",
-      "resource": {
-        "cpus": 1,
-        "memory": "128"
-      }
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app1.yarnfile
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app1.yarnfile b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app1.yarnfile
deleted file mode 100644
index 823561d..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app1.yarnfile
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "name": "example-app1",
-  "version": "1.0.0",
-  "components" :
-  [
-    {
-      "name": "simple",
-      "number_of_containers": 1,
-      "launch_command": "sleep 2",
-      "resource": {
-        "cpus": 1,
-        "memory": "128"
-      }
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app2.yarnfile
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app2.yarnfile b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app2.yarnfile
deleted file mode 100644
index d8fd1d1..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app2.yarnfile
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "name": "example-app2",
-  "version": "1.0.0",
-  "components" :
-  [
-    {
-      "name": "simple",
-      "number_of_containers": 1,
-      "launch_command": "sleep 2",
-      "resource": {
-        "cpus": 1,
-        "memory": "128"
-      }
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
new file mode 100644
index 0000000..b89146a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<FindBugsFilter>
+
+</FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml
new file mode 100644
index 0000000..45168a9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/pom.xml
@@ -0,0 +1,144 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-yarn-services</artifactId>
+    <version>3.2.0-SNAPSHOT</version>
+  </parent>
+  <artifactId>hadoop-yarn-services-api</artifactId>
+  <name>Apache Hadoop YARN Services API</name>
+  <packaging>jar</packaging>
+  <description>Hadoop YARN REST APIs for services</description>
+
+  <build>
+
+    <!-- resources are filtered for dynamic updates. This gets build info in-->
+    <resources>
+      <resource>
+        <directory>src/main/resources</directory>
+        <filtering>true</filtering>
+      </resource>
+      <resource>
+        <directory>src/main/scripts/</directory>
+        <filtering>true</filtering>
+      </resource>
+    </resources>
+
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+        <!-- The configuration of the plugin -->
+        <configuration>
+          <!-- Configuration of the archiver -->
+          <archive>
+            <manifestEntries>
+              <mode>development</mode>
+              <url>${project.url}</url>
+            </manifestEntries>
+            <!-- Manifest specific configuration -->
+            <manifest>
+            </manifest>
+          </archive>
+        </configuration>
+        <executions>
+          <execution>
+            <goals>
+              <goal>test-jar</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+            <exclude>**/*.json</exclude>
+            <exclude>**/*.yarnfile</exclude>
+          </excludes>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+
+  <reporting>
+  </reporting>
+
+  <dependencies>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-services-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-webapp</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.inject</groupId>
+      <artifactId>guice</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>javax.ws.rs</groupId>
+      <artifactId>jsr311-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <!-- ======================================================== -->
+    <!-- Test dependencies -->
+    <!-- ======================================================== -->
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+  </dependencies>
+</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
new file mode 100644
index 0000000..a8e2f51
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java
@@ -0,0 +1,598 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.client;
+
+import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.jsonSerDeser;
+
+import java.io.File;
+import java.io.IOException;
+import java.text.MessageFormat;
+import java.util.List;
+import java.util.Map;
+
+import javax.ws.rs.core.MediaType;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.client.api.AppAdminClient;
+import org.apache.hadoop.yarn.client.api.YarnClient;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.service.api.records.Component;
+import org.apache.hadoop.yarn.service.api.records.ComponentState;
+import org.apache.hadoop.yarn.service.api.records.Container;
+import org.apache.hadoop.yarn.service.api.records.ContainerState;
+import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.api.records.ServiceState;
+import org.apache.hadoop.yarn.service.api.records.ServiceStatus;
+import org.apache.hadoop.yarn.service.conf.RestApiConstants;
+import org.apache.hadoop.yarn.service.utils.JsonSerDeser;
+import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
+import org.apache.hadoop.yarn.util.RMHAUtils;
+import org.codehaus.jackson.map.PropertyNamingStrategy;
+import org.eclipse.jetty.util.UrlEncoded;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.client.WebResource.Builder;
+import com.sun.jersey.api.client.config.ClientConfig;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+
+import static org.apache.hadoop.yarn.service.exceptions.LauncherExitCodes.*;
+
+/**
+ * The rest API client for users to manage services on YARN.
+ */
+public class ApiServiceClient extends AppAdminClient {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ApiServiceClient.class);
+  protected YarnClient yarnClient;
+
+  @Override protected void serviceInit(Configuration configuration)
+      throws Exception {
+    yarnClient = YarnClient.createYarnClient();
+    addService(yarnClient);
+    super.serviceInit(configuration);
+  }
+
+  /**
+   * Calculate Resource Manager address base on working REST API.
+   */
+  private String getRMWebAddress() {
+    Configuration conf = getConfig();
+    String scheme = "http://";
+    String path = "/app/v1/services/version";
+    String rmAddress = conf
+        .get("yarn.resourcemanager.webapp.address");
+    if (YarnConfiguration.useHttps(conf)) {
+      scheme = "https://";
+      rmAddress = conf
+          .get("yarn.resourcemanager.webapp.https.address");
+    }
+    boolean useKerberos = UserGroupInformation.isSecurityEnabled();
+    List<String> rmServers = RMHAUtils
+        .getRMHAWebappAddresses(new YarnConfiguration(conf));
+    for (String host : rmServers) {
+      try {
+        Client client = Client.create();
+        StringBuilder sb = new StringBuilder();
+        sb.append(scheme);
+        sb.append(host);
+        sb.append(path);
+        if (!useKerberos) {
+          try {
+            String username = UserGroupInformation.getCurrentUser().getShortUserName();
+            sb.append("?user.name=");
+            sb.append(username);
+          } catch (IOException e) {
+            LOG.debug("Fail to resolve username: {}", e);
+          }
+        }
+        WebResource webResource = client
+            .resource(sb.toString());
+        if (useKerberos) {
+          AuthenticatedURL.Token token = new AuthenticatedURL.Token();
+          webResource.header("WWW-Authenticate", token);
+        }
+        ClientResponse test = webResource.get(ClientResponse.class);
+        if (test.getStatus() == 200) {
+          rmAddress = host;
+          break;
+        }
+      } catch (Exception e) {
+        LOG.debug("Fail to connect to: "+host, e);
+      }
+    }
+    return scheme+rmAddress;
+  }
+
+  /**
+   * Compute active resource manager API service location.
+   *
+   * @param appName - YARN service name
+   * @return URI to API Service
+   * @throws IOException
+   */
+  private String getServicePath(String appName) throws IOException {
+    String url = getRMWebAddress();
+    StringBuilder api = new StringBuilder();
+    api.append(url);
+    api.append("/app/v1/services");
+    if (appName != null) {
+      api.append("/");
+      api.append(appName);
+    }
+    Configuration conf = getConfig();
+    if (conf.get("hadoop.http.authentication.type").equalsIgnoreCase("simple")) {
+      api.append("?user.name=" + UrlEncoded
+          .encodeString(System.getProperty("user.name")));
+    }
+    return api.toString();
+  }
+
+  private String getInstancesPath(String appName) throws IOException {
+    Preconditions.checkNotNull(appName);
+    String url = getRMWebAddress();
+    StringBuilder api = new StringBuilder();
+    api.append(url);
+    api.append("/app/v1/services/").append(appName).append("/")
+        .append(RestApiConstants.COMP_INSTANCES);
+    Configuration conf = getConfig();
+    if (conf.get("hadoop.http.authentication.type").equalsIgnoreCase(
+        "simple")) {
+      api.append("?user.name=" + UrlEncoded
+          .encodeString(System.getProperty("user.name")));
+    }
+    return api.toString();
+  }
+
+  private String getComponentsPath(String appName) throws IOException {
+    Preconditions.checkNotNull(appName);
+    String url = getRMWebAddress();
+    StringBuilder api = new StringBuilder();
+    api.append(url);
+    api.append("/app/v1/services/").append(appName).append("/")
+        .append(RestApiConstants.COMPONENTS);
+    Configuration conf = getConfig();
+    if (conf.get("hadoop.http.authentication.type").equalsIgnoreCase(
+        "simple")) {
+      api.append("?user.name=" + UrlEncoded
+          .encodeString(System.getProperty("user.name")));
+    }
+    return api.toString();
+  }
+
+  private Builder getApiClient() throws IOException {
+    return getApiClient(getServicePath(null));
+  }
+
+  /**
+   * Setup API service web request.
+   *
+   * @param requestPath
+   * @return
+   * @throws IOException
+   */
+  private Builder getApiClient(String requestPath)
+      throws IOException {
+    Client client = Client.create(getClientConfig());
+    Configuration conf = getConfig();
+    client.setChunkedEncodingSize(null);
+    Builder builder = client
+        .resource(requestPath).type(MediaType.APPLICATION_JSON);
+    if (conf.get("hadoop.http.authentication.type").equals("kerberos")) {
+      AuthenticatedURL.Token token = new AuthenticatedURL.Token();
+      builder.header("WWW-Authenticate", token);
+    }
+    return builder
+        .accept("application/json;charset=utf-8");
+  }
+
+  private ClientConfig getClientConfig() {
+    ClientConfig config = new DefaultClientConfig();
+    config.getProperties().put(
+        ClientConfig.PROPERTY_CHUNKED_ENCODING_SIZE, 0);
+    config.getProperties().put(
+        ClientConfig.PROPERTY_BUFFER_RESPONSE_ENTITY_ON_EXCEPTION, true);
+    return config;
+  }
+
+  private int processResponse(ClientResponse response) {
+    response.bufferEntity();
+    String output;
+    if (response.getStatus() == 401) {
+      LOG.error("Authentication required");
+      return EXIT_EXCEPTION_THROWN;
+    }
+    if (response.getStatus() == 503) {
+      LOG.error("YARN Service is unavailable or disabled.");
+      return EXIT_EXCEPTION_THROWN;
+    }
+    try {
+      ServiceStatus ss = response.getEntity(ServiceStatus.class);
+      output = ss.getDiagnostics();
+    } catch (Throwable t) {
+      output = response.getEntity(String.class);
+    }
+    if (output==null) {
+      output = response.getEntity(String.class);
+    }
+    if (response.getStatus() <= 299) {
+      LOG.info(output);
+      return EXIT_SUCCESS;
+    } else {
+      LOG.error(output);
+      return EXIT_EXCEPTION_THROWN;
+    }
+  }
+
+  /**
+   * Utility method to load Service json from disk or from
+   * YARN examples.
+   *
+   * @param fileName - path to yarnfile
+   * @param serviceName - YARN Service Name
+   * @param lifetime - application lifetime
+   * @param queue - Queue to submit application
+   * @return
+   * @throws IOException
+   * @throws YarnException
+   */
+  public Service loadAppJsonFromLocalFS(String fileName, String serviceName,
+      Long lifetime, String queue) throws IOException, YarnException {
+    File file = new File(fileName);
+    if (!file.exists() && fileName.equals(file.getName())) {
+      String examplesDirStr = System.getenv("YARN_SERVICE_EXAMPLES_DIR");
+      String[] examplesDirs;
+      if (examplesDirStr == null) {
+        String yarnHome = System
+            .getenv(ApplicationConstants.Environment.HADOOP_YARN_HOME.key());
+        examplesDirs = new String[]{
+            yarnHome + "/share/hadoop/yarn/yarn-service-examples",
+            yarnHome + "/yarn-service-examples"
+        };
+      } else {
+        examplesDirs = StringUtils.split(examplesDirStr, ":");
+      }
+      for (String dir : examplesDirs) {
+        file = new File(MessageFormat.format("{0}/{1}/{2}.json",
+            dir, fileName, fileName));
+        if (file.exists()) {
+          break;
+        }
+        // Then look for secondary location.
+        file = new File(MessageFormat.format("{0}/{1}.json",
+            dir, fileName));
+        if (file.exists()) {
+          break;
+        }
+      }
+    }
+    if (!file.exists()) {
+      throw new YarnException("File or example could not be found: " +
+          fileName);
+    }
+    Path filePath = new Path(file.getAbsolutePath());
+    LOG.info("Loading service definition from local FS: " + filePath);
+    Service service = jsonSerDeser
+        .load(FileSystem.getLocal(getConfig()), filePath);
+    if (!StringUtils.isEmpty(serviceName)) {
+      service.setName(serviceName);
+    }
+    if (lifetime != null && lifetime > 0) {
+      service.setLifetime(lifetime);
+    }
+    if (!StringUtils.isEmpty(queue)) {
+      service.setQueue(queue);
+    }
+    return service;
+  }
+
+  /**
+   * Launch YARN service application.
+   *
+   * @param fileName - path to yarnfile
+   * @param appName - YARN Service Name
+   * @param lifetime - application lifetime
+   * @param queue - Queue to submit application
+   */
+  @Override
+  public int actionLaunch(String fileName, String appName, Long lifetime,
+      String queue) throws IOException, YarnException {
+    int result = EXIT_SUCCESS;
+    try {
+      Service service =
+          loadAppJsonFromLocalFS(fileName, appName, lifetime, queue);
+      String buffer = jsonSerDeser.toJson(service);
+      ClientResponse response = getApiClient()
+          .post(ClientResponse.class, buffer);
+      result = processResponse(response);
+    } catch (Exception e) {
+      LOG.error("Fail to launch application: ", e);
+      result = EXIT_EXCEPTION_THROWN;
+    }
+    return result;
+  }
+
+  /**
+   * Stop YARN service application.
+   *
+   * @param appName - YARN Service Name
+   */
+  @Override
+  public int actionStop(String appName) throws IOException, YarnException {
+    int result = EXIT_SUCCESS;
+    try {
+      Service service = new Service();
+      service.setName(appName);
+      service.setState(ServiceState.STOPPED);
+      String buffer = jsonSerDeser.toJson(service);
+      ClientResponse response = getApiClient(getServicePath(appName))
+          .put(ClientResponse.class, buffer);
+      result = processResponse(response);
+    } catch (Exception e) {
+      LOG.error("Fail to stop application: ", e);
+      result = EXIT_EXCEPTION_THROWN;
+    }
+    return result;
+  }
+
+  /**
+   * Start YARN service application.
+   *
+   * @param appName - YARN Service Name
+   */
+  @Override
+  public int actionStart(String appName) throws IOException, YarnException {
+    int result = EXIT_SUCCESS;
+    try {
+      Service service = new Service();
+      service.setName(appName);
+      service.setState(ServiceState.STARTED);
+      String buffer = jsonSerDeser.toJson(service);
+      ClientResponse response = getApiClient(getServicePath(appName))
+          .put(ClientResponse.class, buffer);
+      result = processResponse(response);
+    } catch (Exception e) {
+      LOG.error("Fail to start application: ", e);
+      result = EXIT_EXCEPTION_THROWN;
+    }
+    return result;
+  }
+
+  /**
+   * Save Service configuration.
+   *
+   * @param fileName - path to Yarnfile
+   * @param appName - YARN Service Name
+   * @param lifetime - container life time
+   * @param queue - Queue to submit the application
+   */
+  @Override
+  public int actionSave(String fileName, String appName, Long lifetime,
+      String queue) throws IOException, YarnException {
+    int result = EXIT_SUCCESS;
+    try {
+      Service service =
+          loadAppJsonFromLocalFS(fileName, appName, lifetime, queue);
+      service.setState(ServiceState.STOPPED);
+      String buffer = jsonSerDeser.toJson(service);
+      ClientResponse response = getApiClient()
+          .post(ClientResponse.class, buffer);
+      result = processResponse(response);
+    } catch (Exception e) {
+      LOG.error("Fail to save application: ", e);
+      result = EXIT_EXCEPTION_THROWN;
+    }
+    return result;
+  }
+
+  /**
+   * Decommission a YARN service.
+   *
+   * @param appName - YARN Service Name
+   */
+  @Override
+  public int actionDestroy(String appName) throws IOException, YarnException {
+    int result = EXIT_SUCCESS;
+    try {
+      ClientResponse response = getApiClient(getServicePath(appName))
+          .delete(ClientResponse.class);
+      result = processResponse(response);
+    } catch (Exception e) {
+      LOG.error("Fail to destroy application: ", e);
+      result = EXIT_EXCEPTION_THROWN;
+    }
+    return result;
+  }
+
+  /**
+   * Change number of containers associated with a service.
+   *
+   * @param appName - YARN Service Name
+   * @param componentCounts - list of components and desired container count
+   */
+  @Override
+  public int actionFlex(String appName, Map<String, String> componentCounts)
+      throws IOException, YarnException {
+    int result = EXIT_SUCCESS;
+    try {
+      Service service = new Service();
+      service.setName(appName);
+      service.setState(ServiceState.FLEX);
+      for (Map.Entry<String, String> entry : componentCounts.entrySet()) {
+        Component component = new Component();
+        component.setName(entry.getKey());
+        Long numberOfContainers = Long.parseLong(entry.getValue());
+        component.setNumberOfContainers(numberOfContainers);
+        service.addComponent(component);
+      }
+      String buffer = jsonSerDeser.toJson(service);
+      ClientResponse response = getApiClient(getServicePath(appName))
+          .put(ClientResponse.class, buffer);
+      result = processResponse(response);
+    } catch (Exception e) {
+      LOG.error("Fail to flex application: ", e);
+      result = EXIT_EXCEPTION_THROWN;
+    }
+    return result;
+  }
+
+  @Override
+  public int enableFastLaunch(String destinationFolder) throws IOException, YarnException {
+    ServiceClient sc = new ServiceClient();
+    sc.init(getConfig());
+    sc.start();
+    int result = sc.enableFastLaunch(destinationFolder);
+    sc.close();
+    return result;
+  }
+
+  /**
+   * Retrieve Service Status through REST API.
+   *
+   * @param appIdOrName - YARN application ID or application name
+   * @return Status output
+   */
+  @Override
+  public String getStatusString(String appIdOrName) throws IOException,
+      YarnException {
+    String output = "";
+    String appName;
+    try {
+      ApplicationId appId = ApplicationId.fromString(appIdOrName);
+      ApplicationReport appReport = yarnClient.getApplicationReport(appId);
+      appName = appReport.getName();
+    } catch (IllegalArgumentException e) {
+      // not app Id format, may be app name
+      appName = appIdOrName;
+      ServiceApiUtil.validateNameFormat(appName, getConfig());
+    }
+    try {
+      ClientResponse response = getApiClient(getServicePath(appName))
+          .get(ClientResponse.class);
+      if (response.getStatus() == 404) {
+        StringBuilder sb = new StringBuilder();
+        sb.append(" Service ");
+        sb.append(appName);
+        sb.append(" not found");
+        return sb.toString();
+      }
+      if (response.getStatus() != 200) {
+        StringBuilder sb = new StringBuilder();
+        sb.append(appName);
+        sb.append(" Failed : HTTP error code : ");
+        sb.append(response.getStatus());
+        return sb.toString();
+      }
+      output = response.getEntity(String.class);
+    } catch (Exception e) {
+      LOG.error("Fail to check application status: ", e);
+    }
+    return output;
+  }
+
+  @Override
+  public int initiateUpgrade(String appName,
+      String fileName, boolean autoFinalize) throws IOException, YarnException {
+    int result;
+    try {
+      Service service =
+          loadAppJsonFromLocalFS(fileName, appName, null, null);
+      if (autoFinalize) {
+        service.setState(ServiceState.UPGRADING_AUTO_FINALIZE);
+      } else {
+        service.setState(ServiceState.UPGRADING);
+      }
+      String buffer = jsonSerDeser.toJson(service);
+      ClientResponse response = getApiClient(getServicePath(appName))
+          .put(ClientResponse.class, buffer);
+      result = processResponse(response);
+    } catch (Exception e) {
+      LOG.error("Failed to upgrade application: ", e);
+      result = EXIT_EXCEPTION_THROWN;
+    }
+    return result;
+  }
+
+  @Override
+  public int actionUpgradeInstances(String appName, List<String> compInstances)
+      throws IOException, YarnException {
+    int result;
+    Container[] toUpgrade = new Container[compInstances.size()];
+    try {
+      int idx = 0;
+      for (String instanceName : compInstances) {
+        Container container = new Container();
+        container.setComponentInstanceName(instanceName);
+        container.setState(ContainerState.UPGRADING);
+        toUpgrade[idx++] = container;
+      }
+      String buffer = CONTAINER_JSON_SERDE.toJson(toUpgrade);
+      ClientResponse response = getApiClient(getInstancesPath(appName))
+          .put(ClientResponse.class, buffer);
+      result = processResponse(response);
+    } catch (Exception e) {
+      LOG.error("Failed to upgrade component instance: ", e);
+      result = EXIT_EXCEPTION_THROWN;
+    }
+    return result;
+  }
+
+  @Override
+  public int actionUpgradeComponents(String appName, List<String> components)
+      throws IOException, YarnException {
+    int result;
+    Component[] toUpgrade = new Component[components.size()];
+    try {
+      int idx = 0;
+      for (String compName : components) {
+        Component component = new Component();
+        component.setName(compName);
+        component.setState(ComponentState.UPGRADING);
+        toUpgrade[idx++] = component;
+      }
+      String buffer = COMP_JSON_SERDE.toJson(toUpgrade);
+      ClientResponse response = getApiClient(getComponentsPath(appName))
+          .put(ClientResponse.class, buffer);
+      result = processResponse(response);
+    } catch (Exception e) {
+      LOG.error("Failed to upgrade components: ", e);
+      result = EXIT_EXCEPTION_THROWN;
+    }
+    return result;
+  }
+
+  private static final JsonSerDeser<Container[]> CONTAINER_JSON_SERDE =
+      new JsonSerDeser<>(Container[].class,
+          PropertyNamingStrategy.CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES);
+
+  private static final JsonSerDeser<Component[]> COMP_JSON_SERDE =
+      new JsonSerDeser<>(Component[].class,
+          PropertyNamingStrategy.CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES);
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: HDFS-13558. TestDatanodeHttpXFrame does not shut down cluster. Contributed by Anbang Hu.

Posted by ar...@apache.org.
HDFS-13558. TestDatanodeHttpXFrame does not shut down cluster. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/26f1e22f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/26f1e22f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/26f1e22f

Branch: refs/heads/HDDS-48
Commit: 26f1e22fc9ee326e9c76503d347552faeb6c2d3b
Parents: 328f084
Author: Inigo Goiri <in...@apache.org>
Authored: Thu May 17 13:35:09 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Thu May 17 13:35:09 2018 -0700

----------------------------------------------------------------------
 .../datanode/web/TestDatanodeHttpXFrame.java    | 30 ++++++++++++++------
 1 file changed, 21 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/26f1e22f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestDatanodeHttpXFrame.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestDatanodeHttpXFrame.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestDatanodeHttpXFrame.java
index 9ecd8ea..62827a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestDatanodeHttpXFrame.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestDatanodeHttpXFrame.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.http.HttpServer2;
+import org.junit.After;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -36,13 +37,24 @@ import java.net.URL;
  * Test that X-Frame-Options works correctly with DatanodeHTTPServer.
  */
 public class TestDatanodeHttpXFrame {
+
+  private MiniDFSCluster cluster = null;
+
   @Rule
   public ExpectedException exception = ExpectedException.none();
 
+  @After
+  public void cleanUp() {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
   @Test
   public void testDataNodeXFrameOptionsEnabled() throws Exception {
     boolean xFrameEnabled = true;
-    MiniDFSCluster cluster = createCluster(xFrameEnabled, null);
+    cluster = createCluster(xFrameEnabled, null);
     HttpURLConnection conn = getConn(cluster);
     String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS");
     Assert.assertTrue("X-FRAME-OPTIONS is absent in the header",
@@ -54,7 +66,7 @@ public class TestDatanodeHttpXFrame {
   @Test
   public void testNameNodeXFrameOptionsDisabled() throws Exception {
     boolean xFrameEnabled = false;
-    MiniDFSCluster cluster = createCluster(xFrameEnabled, null);
+    cluster = createCluster(xFrameEnabled, null);
     HttpURLConnection conn = getConn(cluster);
     String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS");
     Assert.assertTrue("unexpected X-FRAME-OPTION in header", xfoHeader == null);
@@ -63,25 +75,25 @@ public class TestDatanodeHttpXFrame {
   @Test
   public void testDataNodeXFramewithInvalidOptions() throws Exception {
     exception.expect(IllegalArgumentException.class);
-    createCluster(false, "Hadoop");
+    cluster = createCluster(false, "Hadoop");
   }
 
-  private MiniDFSCluster createCluster(boolean enabled, String
+  private static MiniDFSCluster createCluster(boolean enabled, String
       value) throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED, enabled);
     if (value != null) {
       conf.set(DFSConfigKeys.DFS_XFRAME_OPTION_VALUE, value);
     }
-    MiniDFSCluster cluster =
+    MiniDFSCluster dfsCluster =
         new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
-    cluster.waitActive();
-    return cluster;
+    dfsCluster.waitActive();
+    return dfsCluster;
   }
 
-  private HttpURLConnection getConn(MiniDFSCluster cluster)
+  private static HttpURLConnection getConn(MiniDFSCluster dfsCluster)
       throws IOException {
-    DataNode datanode = cluster.getDataNodes().get(0);
+    DataNode datanode = dfsCluster.getDataNodes().get(0);
     URL newURL = new URL("http://localhost:" + datanode.getInfoPort());
     HttpURLConnection conn = (HttpURLConnection) newURL.openConnection();
     conn.connect();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/navbar.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/navbar.html b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/navbar.html
new file mode 100644
index 0000000..b73a769
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/navbar.html
@@ -0,0 +1,33 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<nav class="navbar navbar-inverse navbar-fixed-top">
+  <div class="container-fluid">
+    <div class="navbar-header">
+      <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false" aria-controls="navbar">
+        <span class="sr-only">Toggle navigation</span>
+        <span class="icon-bar"></span>
+        <span class="icon-bar"></span>
+        <span class="icon-bar"></span>
+      </button>
+      <a class="navbar-brand" href="#">Apache Hadoop Ozone/HDDS documentation</a>
+    </div>
+    <div id="navbar" class="navbar-collapse collapse">
+      <ul class="nav navbar-nav navbar-right">
+        <li><a href="https://github.com/apache/hadoop">Source</a></li>
+        <li><a href="https://hadoop.apache.org">Apache Hadoop</a></li>
+        <li><a href="https://apache.org">ASF</a></li>
+      </ul>
+    </div>
+  </div>
+</nav>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/sidebar.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/sidebar.html b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/sidebar.html
new file mode 100644
index 0000000..b043911
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/sidebar.html
@@ -0,0 +1,43 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<div class="col-sm-3 col-md-2 sidebar">
+  <ul class="nav nav-sidebar">
+    {{ $currentPage := . }}
+    {{ range .Site.Menus.main }}
+        {{ if .HasChildren }}
+            <li class="{{ if $currentPage.IsMenuCurrent "main" . }}active{{ end }}">
+                <a href="{{ .URL }}">
+                    {{ .Pre }}
+                    <span>{{ .Name }}</span>
+                </a>
+                <ul class="nav">
+                    {{ range .Children }}
+                        <li class="{{ if $currentPage.IsMenuCurrent "main" . }}active{{ end }}">
+                        <a href="{{ .URL }}">{{ .Name }}</a>
+                        </li>
+                    {{ end }}
+                </ul>
+            </li>
+        {{ else }}
+            <li class="{{ if $currentPage.IsMenuCurrent "main" . }}active{{ end }}">
+                <a href="{{ .URL }}">
+                    {{ .Pre }}
+                    <span>{{ .Name }}</span>
+                </a>
+            </li>
+        {{ end }}
+    {{ end }}
+  </ul>
+
+</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css
new file mode 100644
index 0000000..5e39401
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css
@@ -0,0 +1,6 @@
+/*!
+ * Bootstrap v3.3.7 (http://getbootstrap.com)
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */.btn-danger,.btn-default,.btn-info,.btn-primary,.btn-success,.btn-warning{text-shadow:0 -1px 0 rgba(0,0,0,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075)}.btn-danger.active,.btn-danger:active,.btn-default.active,.btn-default:active,.btn-info.active,.btn-info:active,.btn-primary.active,.btn-primary:active,.btn-success.active,.btn-success:active,.btn-warning.active,.btn-warning:active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-danger.disabled,.btn-danger[disabled],.btn-default.disabled,.btn-default[disabled],.btn-info.disabled,.btn-info[disabled],.btn-primary.disabled,.btn-primary[disabled],.btn-success.disabled,.btn-success[disabled],.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-danger,fieldset[disabled] .btn-default,fieldset[disabled] .btn-info,fieldset[disabled] .btn-primary,fieldset[disab
 led] .btn-success,fieldset[disabled] .btn-warning{-webkit-box-shadow:none;box-shadow:none}.btn-danger .badge,.btn-default .badge,.btn-info .badge,.btn-primary .badge,.btn-success .badge,.btn-warning .badge{text-shadow:none}.btn.active,.btn:active{background-image:none}.btn-default{text-shadow:0 1px 0 #fff;background-image:-webkit-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-o-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#e0e0e0));background-image:linear-gradient(to bottom,#fff 0,#e0e0e0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#dbdbdb;border-color:#ccc}.btn-default:focus,.btn-default:hover{background-color:#e0e0e0;background-position:0 -15px}.btn-default.active,.btn-default:active{background-color:#e0e0e0;border-c
 olor:#dbdbdb}.btn-default.disabled,.btn-default.disabled.active,.btn-default.disabled.focus,.btn-default.disabled:active,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled],.btn-default[disabled].active,.btn-default[disabled].focus,.btn-default[disabled]:active,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default,fieldset[disabled] .btn-default.active,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:active,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#e0e0e0;background-image:none}.btn-primary{background-image:-webkit-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-o-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#265a88));background-image:linear-gradient(to bottom,#337ab7 0,#265a88 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', 
 endColorstr='#ff265a88', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#245580}.btn-primary:focus,.btn-primary:hover{background-color:#265a88;background-position:0 -15px}.btn-primary.active,.btn-primary:active{background-color:#265a88;border-color:#245580}.btn-primary.disabled,.btn-primary.disabled.active,.btn-primary.disabled.focus,.btn-primary.disabled:active,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled],.btn-primary[disabled].active,.btn-primary[disabled].focus,.btn-primary[disabled]:active,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-primary.active,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:active,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#265a88;background-image:none}.btn-success{background-image:-webkit-linear-gradient(top,#5c
 b85c 0,#419641 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#419641));background-image:linear-gradient(to bottom,#5cb85c 0,#419641 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#3e8f3e}.btn-success:focus,.btn-success:hover{background-color:#419641;background-position:0 -15px}.btn-success.active,.btn-success:active{background-color:#419641;border-color:#3e8f3e}.btn-success.disabled,.btn-success.disabled.active,.btn-success.disabled.focus,.btn-success.disabled:active,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled],.btn-success[disabled].active,.btn-success[disabled].focus,.btn-success[disabled]:active,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disab
 led] .btn-success,fieldset[disabled] .btn-success.active,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:active,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#419641;background-image:none}.btn-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#2aabd2));background-image:linear-gradient(to bottom,#5bc0de 0,#2aabd2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#28a4c9}.btn-info:focus,.btn-info:hover{background-color:#2aabd2;background-position:0 -15px}.btn-info.active,.btn-info:active{background-color:#2aabd2;border-color:#28a4c9}.btn-info.disabled,.btn-info.disabled.active,
 .btn-info.disabled.focus,.btn-info.disabled:active,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled],.btn-info[disabled].active,.btn-info[disabled].focus,.btn-info[disabled]:active,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info,fieldset[disabled] .btn-info.active,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:active,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#2aabd2;background-image:none}.btn-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#eb9316));background-image:linear-gradient(to bottom,#f0ad4e 0,#eb9316 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);backgr
 ound-repeat:repeat-x;border-color:#e38d13}.btn-warning:focus,.btn-warning:hover{background-color:#eb9316;background-position:0 -15px}.btn-warning.active,.btn-warning:active{background-color:#eb9316;border-color:#e38d13}.btn-warning.disabled,.btn-warning.disabled.active,.btn-warning.disabled.focus,.btn-warning.disabled:active,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled],.btn-warning[disabled].active,.btn-warning[disabled].focus,.btn-warning[disabled]:active,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning,fieldset[disabled] .btn-warning.active,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:active,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#eb9316;background-image:none}.btn-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-webkit-gra
 dient(linear,left top,left bottom,from(#d9534f),to(#c12e2a));background-image:linear-gradient(to bottom,#d9534f 0,#c12e2a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#b92c28}.btn-danger:focus,.btn-danger:hover{background-color:#c12e2a;background-position:0 -15px}.btn-danger.active,.btn-danger:active{background-color:#c12e2a;border-color:#b92c28}.btn-danger.disabled,.btn-danger.disabled.active,.btn-danger.disabled.focus,.btn-danger.disabled:active,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled],.btn-danger[disabled].active,.btn-danger[disabled].focus,.btn-danger[disabled]:active,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger,fieldset[disabled] .btn-danger.active,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:active
 ,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#c12e2a;background-image:none}.img-thumbnail,.thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{background-color:#e8e8e8;background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{background-color:#2e6da4;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);backgroun
 d-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.navbar-default{background-image:-webkit-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-o-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#f8f8f8));background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075)}.navbar-default .navbar-nav>
 .active>a,.navbar-default .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-o-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dbdbdb),to(#e2e2e2));background-image:linear-gradient(to bottom,#dbdbdb 0,#e2e2e2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.075);box-shadow:inset 0 3px 9px rgba(0,0,0,.075)}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,.25)}.navbar-inverse{background-image:-webkit-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-o-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#3c3c3c),to(#222));background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);filter:progid:DXImageTransform.Microsoft.gradient(start
 Colorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-radius:4px}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-o-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#080808),to(#0f0f0f));background-image:linear-gradient(to bottom,#080808 0,#0f0f0f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.25);box-shadow:inset 0 3px 9px rgba(0,0,0,.25)}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,.25)}.navbar-fixed-bottom,.navbar-fixed-top,.navbar-static-top{border-radius:0}@media (max-width:767px){.navbar .navbar-nav .op
 en .dropdown-menu>.active>a,.navbar .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}}.alert{text-shadow:0 1px 0 rgba(255,255,255,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05)}.alert-success{background-image:-webkit-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-webkit-gradient(linear,left top,left bott
 om,from(#dff0d8),to(#c8e5bc));background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);background-repeat:repeat-x;border-color:#b2dba1}.alert-info{background-image:-webkit-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#b9def0));background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);background-repeat:repeat-x;border-color:#9acfea}.alert-warning{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#f8efc0));background-image:linear-gradie
 nt(to bottom,#fcf8e3 0,#f8efc0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);background-repeat:repeat-x;border-color:#f5e79e}.alert-danger{background-image:-webkit-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-o-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#e7c3c3));background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);background-repeat:repeat-x;border-color:#dca7a7}.progress{background-image:-webkit-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#ebebeb),to(#f5f5f5));background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);filter:progid:DXImageTrans
 form.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x}.progress-bar{background-image:-webkit-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-o-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#286090));background-image:linear-gradient(to bottom,#337ab7 0,#286090 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);background-repeat:repeat-x}.progress-bar-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#449d44));background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);b
 ackground-repeat:repeat-x}.progress-bar-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#31b0d5));background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);background-repeat:repeat-x}.progress-bar-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#ec971f));background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);background-repeat:repeat-x}.progress-bar-danger{background-image:-webkit-linear-gradient(
 top,#d9534f 0,#c9302c 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c9302c));background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);background-repeat:repeat-x}.progress-bar-striped{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.list-group{border-radius:4px;-webkit
 -box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{text-shadow:0 -1px 0 #286090;background-image:-webkit-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2b669a));background-image:linear-gradient(to bottom,#337ab7 0,#2b669a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);background-repeat:repeat-x;border-color:#2b669a}.list-group-item.active .badge,.list-group-item.active:focus .badge,.list-group-item.active:hover .badge{text-shadow:none}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.05);box-shadow:0 1px 2px rgba(0,0,0,.05)}.panel-default>.panel-heading{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f
 5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.panel-primary>.panel-heading{background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.panel-success>.panel-heading{background-image:-webkit-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-webkit-gradient(linear,left t
 op,left bottom,from(#dff0d8),to(#d0e9c6));background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);background-repeat:repeat-x}.panel-info>.panel-heading{background-image:-webkit-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#c4e3f3));background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);background-repeat:repeat-x}.panel-warning>.panel-heading{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#faf2cc));background-image:linear-gradie
 nt(to bottom,#fcf8e3 0,#faf2cc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);background-repeat:repeat-x}.panel-danger>.panel-heading{background-image:-webkit-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-o-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#ebcccc));background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);background-repeat:repeat-x}.well{background-image:-webkit-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#e8e8e8),to(#f5f5f5));background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startCo
 lorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x;border-color:#dcdcdc;-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1)}
+/*# sourceMappingURL=bootstrap-theme.min.css.map */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css.map
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css.map b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css.map
new file mode 100644
index 0000000..94813e9
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css.map
@@ -0,0 +1 @@
+{"version":3,"sources":["less/theme.less","less/mixins/vendor-prefixes.less","less/mixins/gradients.less","less/mixins/reset-filter.less"],"names":[],"mappings":";;;;AAmBA,YAAA,aAAA,UAAA,aAAA,aAAA,aAME,YAAA,EAAA,KAAA,EAAA,eC2CA,mBAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,iBDvCR,mBAAA,mBAAA,oBAAA,oBAAA,iBAAA,iBAAA,oBAAA,oBAAA,oBAAA,oBAAA,oBAAA,oBCsCA,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBDlCR,qBAAA,sBAAA,sBAAA,uBAAA,mBAAA,oBAAA,sBAAA,uBAAA,sBAAA,uBAAA,sBAAA,uBAAA,+BAAA,gCAAA,6BAAA,gCAAA,gCAAA,gCCiCA,mBAAA,KACQ,WAAA,KDlDV,mBAAA,oBAAA,iBAAA,oBAAA,oBAAA,oBAuBI,YAAA,KAyCF,YAAA,YAEE,iBAAA,KAKJ,aErEI,YAAA,EAAA,IAAA,EAAA,KACA,iBAAA,iDACA,iBAAA,4CAAA,iBAAA,qEAEA,iBAAA,+CCnBF,OAAA,+GH4CA,OAAA,0DACA,kBAAA,SAuC2C,aAAA,QAA2B,aAAA,KArCtE,mBAAA,mBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,oBAAA,oBAEE,iBAAA,QACA,aAAA,QAMA,sBAAA,6BAAA,4BAAA,6BAAA,4BAAA,4BAAA,uBAAA,8BAAA,6BAAA,8BAAA,6BAAA,6BAAA,gCAAA,uCAAA,sCAAA,uCAAA,sCAAA,sCAME,iBAAA,Q
 ACA,iBAAA,KAgBN,aEtEI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDAEA,OAAA,+GCnBF,OAAA,0DH4CA,kBAAA,SACA,aAAA,QAEA,mBAAA,mBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,oBAAA,oBAEE,iBAAA,QACA,aAAA,QAMA,sBAAA,6BAAA,4BAAA,6BAAA,4BAAA,4BAAA,uBAAA,8BAAA,6BAAA,8BAAA,6BAAA,6BAAA,gCAAA,uCAAA,sCAAA,uCAAA,sCAAA,sCAME,iBAAA,QACA,iBAAA,KAiBN,aEvEI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDAEA,OAAA,+GCnBF,OAAA,0DH4CA,kBAAA,SACA,aAAA,QAEA,mBAAA,mBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,oBAAA,oBAEE,iBAAA,QACA,aAAA,QAMA,sBAAA,6BAAA,4BAAA,6BAAA,4BAAA,4BAAA,uBAAA,8BAAA,6BAAA,8BAAA,6BAAA,6BAAA,gCAAA,uCAAA,sCAAA,uCAAA,sCAAA,sCAME,iBAAA,QACA,iBAAA,KAkBN,UExEI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDAEA,OAAA,+GCnBF,OAAA,0DH4CA,kBAAA,SACA,aAAA,QAEA,gBAAA,gBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,iBAAA,iBAEE,iBAAA,QACA,aAAA,QAMA,mBAAA,0BAAA,yBAAA,0BAAA,yBAAA,yBAAA,oBAAA,2BAAA,0BAAA,2BAAA,0BAAA,0BAAA,6BAAA,oCAAA,mCAAA,oCAAA,mCAAA,mCAME,iBAAA,QACA,iBAAA,KAmBN,aEzEI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDAEA,OAAA,+GCnBF,OAAA,0DH4
 CA,kBAAA,SACA,aAAA,QAEA,mBAAA,mBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,oBAAA,oBAEE,iBAAA,QACA,aAAA,QAMA,sBAAA,6BAAA,4BAAA,6BAAA,4BAAA,4BAAA,uBAAA,8BAAA,6BAAA,8BAAA,6BAAA,6BAAA,gCAAA,uCAAA,sCAAA,uCAAA,sCAAA,sCAME,iBAAA,QACA,iBAAA,KAoBN,YE1EI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDAEA,OAAA,+GCnBF,OAAA,0DH4CA,kBAAA,SACA,aAAA,QAEA,kBAAA,kBAEE,iBAAA,QACA,oBAAA,EAAA,MAGF,mBAAA,mBAEE,iBAAA,QACA,aAAA,QAMA,qBAAA,4BAAA,2BAAA,4BAAA,2BAAA,2BAAA,sBAAA,6BAAA,4BAAA,6BAAA,4BAAA,4BAAA,+BAAA,sCAAA,qCAAA,sCAAA,qCAAA,qCAME,iBAAA,QACA,iBAAA,KA2BN,eAAA,WClCE,mBAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,EAAA,IAAA,IAAA,iBD2CV,0BAAA,0BE3FI,iBAAA,QACA,iBAAA,oDACA,iBAAA,+CAAA,iBAAA,wEACA,iBAAA,kDACA,OAAA,+GF0FF,kBAAA,SAEF,yBAAA,+BAAA,+BEhGI,iBAAA,QACA,iBAAA,oDACA,iBAAA,+CAAA,iBAAA,wEACA,iBAAA,kDACA,OAAA,+GFgGF,kBAAA,SASF,gBE7GI,iBAAA,iDACA,iBAAA,4CACA,iBAAA,qEAAA,iBAAA,+CACA,OAAA,+GACA,OAAA,0DCnBF,kBAAA,SH+HA,cAAA,ICjEA,mBAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,iBD6DV,
 sCAAA,oCE7GI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SD2CF,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBD0EV,cAAA,iBAEE,YAAA,EAAA,IAAA,EAAA,sBAIF,gBEhII,iBAAA,iDACA,iBAAA,4CACA,iBAAA,qEAAA,iBAAA,+CACA,OAAA,+GACA,OAAA,0DCnBF,kBAAA,SHkJA,cAAA,IAHF,sCAAA,oCEhII,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SD2CF,mBAAA,MAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBDgFV,8BAAA,iCAYI,YAAA,EAAA,KAAA,EAAA,gBAKJ,qBAAA,kBAAA,mBAGE,cAAA,EAqBF,yBAfI,mDAAA,yDAAA,yDAGE,MAAA,KE7JF,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,UFqKJ,OACE,YAAA,EAAA,IAAA,EAAA,qBC3HA,mBAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,sBAAA,EAAA,IAAA,IAAA,gBDsIV,eEtLI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF8KF,aAAA,QAKF,YEvLI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF8KF,aAAA,QAMF,eExLI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF8KF,aA
 AA,QAOF,cEzLI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF8KF,aAAA,QAeF,UEjMI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFuMJ,cE3MI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFwMJ,sBE5MI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFyMJ,mBE7MI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF0MJ,sBE9MI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF2MJ,qBE/MI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF+MJ,sBElLI,iBAAA,yKACA,iBAAA,oKACA,iBAAA,iKFyLJ,YACE,cAAA,IC9KA,mBAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,EAAA,IAAA,IAAA,iBDgLV,wBAAA,8BAAA,8BAGE,YAAA,EAAA,KAAA,EAAA,QEnOE,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFiOF,aAAA,QALF,+BAAA,qCAAA,qCAQI,YAAA,KAUJ,OCnME,mBAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,EAAA,IAAA,IAAA,gBD4MV,8BE5PI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFyPJ,8BE7PI,iBAAA,oDACA,iBAAA,+CACA,
 iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF0PJ,8BE9PI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF2PJ,2BE/PI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF4PJ,8BEhQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF6PJ,6BEjQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFoQJ,MExQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFsQF,aAAA,QC3NA,mBAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,EAAA,IAAA,EAAA,qBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBAAA,EAAA,IAAA,EAAA","sourcesContent":["/*!\n * Bootstrap v3.3.7 (http://getbootstrap.com)\n * Copyright 2011-2016 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n\n//\n// Load core variables and mixins\n// --------------------------------------------------\n\n@import \"variables.less\";\n@import \"mixins.less\";\n\n\n//\n// Buttons\n// --------------------------------------------------\n\n// Common styles\n.btn-defa
 ult,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n  text-shadow: 0 -1px 0 rgba(0,0,0,.2);\n  @shadow: inset 0 1px 0 rgba(255,255,255,.15), 0 1px 1px rgba(0,0,0,.075);\n  .box-shadow(@shadow);\n\n  // Reset the shadow\n  &:active,\n  &.active {\n    .box-shadow(inset 0 3px 5px rgba(0,0,0,.125));\n  }\n\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    .box-shadow(none);\n  }\n\n  .badge {\n    text-shadow: none;\n  }\n}\n\n// Mixin for generating new styles\n.btn-styles(@btn-color: #555) {\n  #gradient > .vertical(@start-color: @btn-color; @end-color: darken(@btn-color, 12%));\n  .reset-filter(); // Disable gradients for IE9 because filter bleeds through rounded corners; see https://github.com/twbs/bootstrap/issues/10620\n  background-repeat: repeat-x;\n  border-color: darken(@btn-color, 14%);\n\n  &:hover,\n  &:focus  {\n    background-color: darken(@btn-color, 12%);\n    background-position: 0 -15px;\n  }\n\n  &:active,\n  &.active {\n    bac
 kground-color: darken(@btn-color, 12%);\n    border-color: darken(@btn-color, 14%);\n  }\n\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    &,\n    &:hover,\n    &:focus,\n    &.focus,\n    &:active,\n    &.active {\n      background-color: darken(@btn-color, 12%);\n      background-image: none;\n    }\n  }\n}\n\n// Common styles\n.btn {\n  // Remove the gradient for the pressed/active state\n  &:active,\n  &.active {\n    background-image: none;\n  }\n}\n\n// Apply the mixin to the buttons\n.btn-default { .btn-styles(@btn-default-bg); text-shadow: 0 1px 0 #fff; border-color: #ccc; }\n.btn-primary { .btn-styles(@btn-primary-bg); }\n.btn-success { .btn-styles(@btn-success-bg); }\n.btn-info    { .btn-styles(@btn-info-bg); }\n.btn-warning { .btn-styles(@btn-warning-bg); }\n.btn-danger  { .btn-styles(@btn-danger-bg); }\n\n\n//\n// Images\n// --------------------------------------------------\n\n.thumbnail,\n.img-thumbnail {\n  .box-shadow(0 1px 2px rgba(0,0,0,.075));\n}\n\n
 \n//\n// Dropdowns\n// --------------------------------------------------\n\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  #gradient > .vertical(@start-color: @dropdown-link-hover-bg; @end-color: darken(@dropdown-link-hover-bg, 5%));\n  background-color: darken(@dropdown-link-hover-bg, 5%);\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  #gradient > .vertical(@start-color: @dropdown-link-active-bg; @end-color: darken(@dropdown-link-active-bg, 5%));\n  background-color: darken(@dropdown-link-active-bg, 5%);\n}\n\n\n//\n// Navbar\n// --------------------------------------------------\n\n// Default navbar\n.navbar-default {\n  #gradient > .vertical(@start-color: lighten(@navbar-default-bg, 10%); @end-color: @navbar-default-bg);\n  .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered\n  border-radius: @navbar-border-radius;\n  @shadow: inset 0 1px 0 rgba(255,255,25
 5,.15), 0 1px 5px rgba(0,0,0,.075);\n  .box-shadow(@shadow);\n\n  .navbar-nav > .open > a,\n  .navbar-nav > .active > a {\n    #gradient > .vertical(@start-color: darken(@navbar-default-link-active-bg, 5%); @end-color: darken(@navbar-default-link-active-bg, 2%));\n    .box-shadow(inset 0 3px 9px rgba(0,0,0,.075));\n  }\n}\n.navbar-brand,\n.navbar-nav > li > a {\n  text-shadow: 0 1px 0 rgba(255,255,255,.25);\n}\n\n// Inverted navbar\n.navbar-inverse {\n  #gradient > .vertical(@start-color: lighten(@navbar-inverse-bg, 10%); @end-color: @navbar-inverse-bg);\n  .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered; see https://github.com/twbs/bootstrap/issues/10257\n  border-radius: @navbar-border-radius;\n  .navbar-nav > .open > a,\n  .navbar-nav > .active > a {\n    #gradient > .vertical(@start-color: @navbar-inverse-link-active-bg; @end-color: lighten(@navbar-inverse-link-active-bg, 2.5%));\n    .box-shadow(inset 0 3px 9px rgba(0,0,0,.25));\n  }\
 n\n  .navbar-brand,\n  .navbar-nav > li > a {\n    text-shadow: 0 -1px 0 rgba(0,0,0,.25);\n  }\n}\n\n// Undo rounded corners in static and fixed navbars\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  border-radius: 0;\n}\n\n// Fix active state of dropdown items in collapsed mode\n@media (max-width: @grid-float-breakpoint-max) {\n  .navbar .navbar-nav .open .dropdown-menu > .active > a {\n    &,\n    &:hover,\n    &:focus {\n      color: #fff;\n      #gradient > .vertical(@start-color: @dropdown-link-active-bg; @end-color: darken(@dropdown-link-active-bg, 5%));\n    }\n  }\n}\n\n\n//\n// Alerts\n// --------------------------------------------------\n\n// Common styles\n.alert {\n  text-shadow: 0 1px 0 rgba(255,255,255,.2);\n  @shadow: inset 0 1px 0 rgba(255,255,255,.25), 0 1px 2px rgba(0,0,0,.05);\n  .box-shadow(@shadow);\n}\n\n// Mixin for generating new styles\n.alert-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 7.5
 %));\n  border-color: darken(@color, 15%);\n}\n\n// Apply the mixin to the alerts\n.alert-success    { .alert-styles(@alert-success-bg); }\n.alert-info       { .alert-styles(@alert-info-bg); }\n.alert-warning    { .alert-styles(@alert-warning-bg); }\n.alert-danger     { .alert-styles(@alert-danger-bg); }\n\n\n//\n// Progress bars\n// --------------------------------------------------\n\n// Give the progress background some depth\n.progress {\n  #gradient > .vertical(@start-color: darken(@progress-bg, 4%); @end-color: @progress-bg)\n}\n\n// Mixin for generating new styles\n.progress-bar-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 10%));\n}\n\n// Apply the mixin to the progress bars\n.progress-bar            { .progress-bar-styles(@progress-bar-bg); }\n.progress-bar-success    { .progress-bar-styles(@progress-bar-success-bg); }\n.progress-bar-info       { .progress-bar-styles(@progress-bar-info-bg); }\n.progress-bar-warning    { .progress
 -bar-styles(@progress-bar-warning-bg); }\n.progress-bar-danger     { .progress-bar-styles(@progress-bar-danger-bg); }\n\n// Reset the striped class because our mixins don't do multiple gradients and\n// the above custom styles override the new `.progress-bar-striped` in v3.2.0.\n.progress-bar-striped {\n  #gradient > .striped();\n}\n\n\n//\n// List groups\n// --------------------------------------------------\n\n.list-group {\n  border-radius: @border-radius-base;\n  .box-shadow(0 1px 2px rgba(0,0,0,.075));\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  text-shadow: 0 -1px 0 darken(@list-group-active-bg, 10%);\n  #gradient > .vertical(@start-color: @list-group-active-bg; @end-color: darken(@list-group-active-bg, 7.5%));\n  border-color: darken(@list-group-active-border, 7.5%);\n\n  .badge {\n    text-shadow: none;\n  }\n}\n\n\n//\n// Panels\n// --------------------------------------------------\n\n// Common styles\n.panel {\n  .box-s
 hadow(0 1px 2px rgba(0,0,0,.05));\n}\n\n// Mixin for generating new styles\n.panel-heading-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 5%));\n}\n\n// Apply the mixin to the panel headings only\n.panel-default > .panel-heading   { .panel-heading-styles(@panel-default-heading-bg); }\n.panel-primary > .panel-heading   { .panel-heading-styles(@panel-primary-heading-bg); }\n.panel-success > .panel-heading   { .panel-heading-styles(@panel-success-heading-bg); }\n.panel-info > .panel-heading      { .panel-heading-styles(@panel-info-heading-bg); }\n.panel-warning > .panel-heading   { .panel-heading-styles(@panel-warning-heading-bg); }\n.panel-danger > .panel-heading    { .panel-heading-styles(@panel-danger-heading-bg); }\n\n\n//\n// Wells\n// --------------------------------------------------\n\n.well {\n  #gradient > .vertical(@start-color: darken(@well-bg, 5%); @end-color: @well-bg);\n  border-color: darken(@well-bg, 10%);\n  @shadow: inset 0
  1px 3px rgba(0,0,0,.05), 0 1px 0 rgba(255,255,255,.1);\n  .box-shadow(@shadow);\n}\n","// Vendor Prefixes\n//\n// All vendor mixins are deprecated as of v3.2.0 due to the introduction of\n// Autoprefixer in our Gruntfile. They have been removed in v4.\n\n// - Animations\n// - Backface visibility\n// - Box shadow\n// - Box sizing\n// - Content columns\n// - Hyphens\n// - Placeholder text\n// - Transformations\n// - Transitions\n// - User Select\n\n\n// Animations\n.animation(@animation) {\n  -webkit-animation: @animation;\n       -o-animation: @animation;\n          animation: @animation;\n}\n.animation-name(@name) {\n  -webkit-animation-name: @name;\n          animation-name: @name;\n}\n.animation-duration(@duration) {\n  -webkit-animation-duration: @duration;\n          animation-duration: @duration;\n}\n.animation-timing-function(@timing-function) {\n  -webkit-animation-timing-function: @timing-function;\n          animation-timing-function: @timing-function;\n}\n.animation-delay
 (@delay) {\n  -webkit-animation-delay: @delay;\n          animation-delay: @delay;\n}\n.animation-iteration-count(@iteration-count) {\n  -webkit-animation-iteration-count: @iteration-count;\n          animation-iteration-count: @iteration-count;\n}\n.animation-direction(@direction) {\n  -webkit-animation-direction: @direction;\n          animation-direction: @direction;\n}\n.animation-fill-mode(@fill-mode) {\n  -webkit-animation-fill-mode: @fill-mode;\n          animation-fill-mode: @fill-mode;\n}\n\n// Backface visibility\n// Prevent browsers from flickering when using CSS 3D transforms.\n// Default value is `visible`, but can be changed to `hidden`\n\n.backface-visibility(@visibility) {\n  -webkit-backface-visibility: @visibility;\n     -moz-backface-visibility: @visibility;\n          backface-visibility: @visibility;\n}\n\n// Drop shadows\n//\n// Note: Deprecated `.box-shadow()` as of v3.1.0 since all of Bootstrap's\n// supported browsers that have box shadow capabilities now su
 pport it.\n\n.box-shadow(@shadow) {\n  -webkit-box-shadow: @shadow; // iOS <4.3 & Android <4.1\n          box-shadow: @shadow;\n}\n\n// Box sizing\n.box-sizing(@boxmodel) {\n  -webkit-box-sizing: @boxmodel;\n     -moz-box-sizing: @boxmodel;\n          box-sizing: @boxmodel;\n}\n\n// CSS3 Content Columns\n.content-columns(@column-count; @column-gap: @grid-gutter-width) {\n  -webkit-column-count: @column-count;\n     -moz-column-count: @column-count;\n          column-count: @column-count;\n  -webkit-column-gap: @column-gap;\n     -moz-column-gap: @column-gap;\n          column-gap: @column-gap;\n}\n\n// Optional hyphenation\n.hyphens(@mode: auto) {\n  word-wrap: break-word;\n  -webkit-hyphens: @mode;\n     -moz-hyphens: @mode;\n      -ms-hyphens: @mode; // IE10+\n       -o-hyphens: @mode;\n          hyphens: @mode;\n}\n\n// Placeholder text\n.placeholder(@color: @input-color-placeholder) {\n  // Firefox\n  &::-moz-placeholder {\n    color: @color;\n    opacity: 1; // Override Firefox
 's unusual default opacity; see https://github.com/twbs/bootstrap/pull/11526\n  }\n  &:-ms-input-placeholder { color: @color; } // Internet Explorer 10+\n  &::-webkit-input-placeholder  { color: @color; } // Safari and Chrome\n}\n\n// Transformations\n.scale(@ratio) {\n  -webkit-transform: scale(@ratio);\n      -ms-transform: scale(@ratio); // IE9 only\n       -o-transform: scale(@ratio);\n          transform: scale(@ratio);\n}\n.scale(@ratioX; @ratioY) {\n  -webkit-transform: scale(@ratioX, @ratioY);\n      -ms-transform: scale(@ratioX, @ratioY); // IE9 only\n       -o-transform: scale(@ratioX, @ratioY);\n          transform: scale(@ratioX, @ratioY);\n}\n.scaleX(@ratio) {\n  -webkit-transform: scaleX(@ratio);\n      -ms-transform: scaleX(@ratio); // IE9 only\n       -o-transform: scaleX(@ratio);\n          transform: scaleX(@ratio);\n}\n.scaleY(@ratio) {\n  -webkit-transform: scaleY(@ratio);\n      -ms-transform: scaleY(@ratio); // IE9 only\n       -o-transform: scaleY(@ratio);\n  
         transform: scaleY(@ratio);\n}\n.skew(@x; @y) {\n  -webkit-transform: skewX(@x) skewY(@y);\n      -ms-transform: skewX(@x) skewY(@y); // See https://github.com/twbs/bootstrap/issues/4885; IE9+\n       -o-transform: skewX(@x) skewY(@y);\n          transform: skewX(@x) skewY(@y);\n}\n.translate(@x; @y) {\n  -webkit-transform: translate(@x, @y);\n      -ms-transform: translate(@x, @y); // IE9 only\n       -o-transform: translate(@x, @y);\n          transform: translate(@x, @y);\n}\n.translate3d(@x; @y; @z) {\n  -webkit-transform: translate3d(@x, @y, @z);\n          transform: translate3d(@x, @y, @z);\n}\n.rotate(@degrees) {\n  -webkit-transform: rotate(@degrees);\n      -ms-transform: rotate(@degrees); // IE9 only\n       -o-transform: rotate(@degrees);\n          transform: rotate(@degrees);\n}\n.rotateX(@degrees) {\n  -webkit-transform: rotateX(@degrees);\n      -ms-transform: rotateX(@degrees); // IE9 only\n       -o-transform: rotateX(@degrees);\n          transform: rotateX
 (@degrees);\n}\n.rotateY(@degrees) {\n  -webkit-transform: rotateY(@degrees);\n      -ms-transform: rotateY(@degrees); // IE9 only\n       -o-transform: rotateY(@degrees);\n          transform: rotateY(@degrees);\n}\n.perspective(@perspective) {\n  -webkit-perspective: @perspective;\n     -moz-perspective: @perspective;\n          perspective: @perspective;\n}\n.perspective-origin(@perspective) {\n  -webkit-perspective-origin: @perspective;\n     -moz-perspective-origin: @perspective;\n          perspective-origin: @perspective;\n}\n.transform-origin(@origin) {\n  -webkit-transform-origin: @origin;\n     -moz-transform-origin: @origin;\n      -ms-transform-origin: @origin; // IE9 only\n          transform-origin: @origin;\n}\n\n\n// Transitions\n\n.transition(@transition) {\n  -webkit-transition: @transition;\n       -o-transition: @transition;\n          transition: @transition;\n}\n.transition-property(@transition-property) {\n  -webkit-transition-property: @transition-property;\n
           transition-property: @transition-property;\n}\n.transition-delay(@transition-delay) {\n  -webkit-transition-delay: @transition-delay;\n          transition-delay: @transition-delay;\n}\n.transition-duration(@transition-duration) {\n  -webkit-transition-duration: @transition-duration;\n          transition-duration: @transition-duration;\n}\n.transition-timing-function(@timing-function) {\n  -webkit-transition-timing-function: @timing-function;\n          transition-timing-function: @timing-function;\n}\n.transition-transform(@transition) {\n  -webkit-transition: -webkit-transform @transition;\n     -moz-transition: -moz-transform @transition;\n       -o-transition: -o-transform @transition;\n          transition: transform @transition;\n}\n\n\n// User select\n// For selecting text on the page\n\n.user-select(@select) {\n  -webkit-user-select: @select;\n     -moz-user-select: @select;\n      -ms-user-select: @select; // IE10+\n          user-select: @select;\n}\n","// Gradi
 ents\n\n#gradient {\n\n  // Horizontal gradient, from left to right\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .horizontal(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Opera 12\n    background-image: linear-gradient(to right, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    background-repeat: repeat-x;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\",argb(@start-color),argb(@end-color))); // IE9 and down\n  }\n\n  // Vertical gradient, from top t
 o bottom\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .vertical(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Opera 12\n    background-image: linear-gradient(to bottom, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    background-repeat: repeat-x;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\",argb(@start-color),argb(@end-color))); // IE9 and down\n  }\n\n  .directional(@start-color: #555; @end-color: #333; @deg: 45deg) {\n    background-repeat: re
 peat-x;\n    background-image: -webkit-linear-gradient(@deg, @start-color, @end-color); // Safari 5.1-6, Chrome 10+\n    background-image: -o-linear-gradient(@deg, @start-color, @end-color); // Opera 12\n    background-image: linear-gradient(@deg, @start-color, @end-color); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n  }\n  .horizontal-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: -o-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(to right, @start-color, @mid-color @color-stop, @end-color);\n    background-repeat: no-repeat;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\",argb(@start-color),argb(@end-color))); // IE9 and down, gets no color-stop at al
 l for proper fallback\n  }\n  .vertical-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: -o-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-repeat: no-repeat;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\",argb(@start-color),argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n  }\n  .radial(@inner-color: #555; @outer-color: #333) {\n    background-image: -webkit-radial-gradient(circle, @inner-color, @outer-color);\n    background-image: radial-gradient(circle, @inner-color, @outer-color);\n    background-repeat: no-repeat;\n  }\n  .striped(@color: rgba(255,255,255,.15); @angle: 45deg) {\n    
 background-image: -webkit-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: -o-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n  }\n}\n","// Reset filters for IE\n//\n// When you need to remove a gradient background, do not forget to use this to reset\n// the IE filter for IE9 and below.\n\n.reset-filter() {\n  filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(enabled = false)\"));\n}\n"]}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: HDFS-13573. Javadoc for BlockPlacementPolicyDefault is inaccurate. Contributed by Zsolt Venczel.

Posted by ar...@apache.org.
HDFS-13573. Javadoc for BlockPlacementPolicyDefault is inaccurate. Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f749517c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f749517c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f749517c

Branch: refs/heads/HDDS-48
Commit: f749517cc78fc761cecff21e8b7f65fb719bfca2
Parents: 8783613
Author: Yiqun Lin <yq...@apache.org>
Authored: Fri May 18 10:43:53 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Fri May 18 10:43:53 2018 +0800

----------------------------------------------------------------------
 .../blockmanagement/BlockPlacementPolicyDefault.java   | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f749517c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index a37cda4..518e62c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -41,10 +41,15 @@ import com.google.common.annotations.VisibleForTesting;
  * The class is responsible for choosing the desired number of targets
  * for placing block replicas.
  * The replica placement strategy is that if the writer is on a datanode,
- * the 1st replica is placed on the local machine, 
- * otherwise a random datanode. The 2nd replica is placed on a datanode
- * that is on a different rack. The 3rd replica is placed on a datanode
- * which is on a different node of the rack as the second replica.
+ * the 1st replica is placed on the local machine by default
+ * (By passing the {@link org.apache.hadoop.fs.CreateFlag#NO_LOCAL_WRITE} flag
+ * the client can request not to put a block replica on the local datanode.
+ * Subsequent replicas will still follow default block placement policy.).
+ * If the writer is not on a datanode, the 1st replica is placed on a random
+ * node.
+ * The 2nd replica is placed on a datanode that is on a different rack.
+ * The 3rd replica is placed on a datanode which is on a different node of the
+ * rack as the second replica.
  */
 @InterfaceAudience.Private
 public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: YARN-8141. Removed YARN_CONTAINER_RUNTIME_DOCKER_LOCAL_RESOURCE_MOUNTS flag. Contributed by Chandni Singh

Posted by ar...@apache.org.
YARN-8141.  Removed YARN_CONTAINER_RUNTIME_DOCKER_LOCAL_RESOURCE_MOUNTS flag.
            Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/989cfdc1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/989cfdc1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/989cfdc1

Branch: refs/heads/HDDS-48
Commit: 989cfdc1e0f1c27b35679c668a9bde6a0be845b5
Parents: 53b807a
Author: Eric Yang <ey...@apache.org>
Authored: Thu May 17 20:56:04 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Thu May 17 20:56:04 2018 -0400

----------------------------------------------------------------------
 .../containerlaunch/TestAbstractLauncher.java   | 54 ++++++++++++++++++++
 1 file changed, 54 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/989cfdc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/containerlaunch/TestAbstractLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/containerlaunch/TestAbstractLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/containerlaunch/TestAbstractLauncher.java
new file mode 100644
index 0000000..f4f1a50
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/containerlaunch/TestAbstractLauncher.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.containerlaunch;
+
+import org.apache.hadoop.yarn.service.ServiceContext;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.mockito.Mockito.mock;
+
+/**
+ * Tests for {@link AbstractLauncher}.
+ */
+public class TestAbstractLauncher {
+
+  private AbstractLauncher launcher;
+
+  @Before
+  public void setup() {
+    launcher = new AbstractLauncher(mock(ServiceContext.class));
+  }
+
+  @Test
+  public void testDockerContainerMounts() throws IOException {
+    launcher.yarnDockerMode = true;
+    launcher.envVars.put(AbstractLauncher.ENV_DOCKER_CONTAINER_MOUNTS,
+        "s1:t1:ro");
+    launcher.mountPaths.put("s2", "t2");
+    launcher.completeContainerLaunch();
+    String dockerContainerMounts = launcher.containerLaunchContext
+        .getEnvironment().get(AbstractLauncher.ENV_DOCKER_CONTAINER_MOUNTS);
+
+    Assert.assertEquals("s1:t1:ro,s2:t2:ro", dockerContainerMounts);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.svg
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.svg b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.svg
new file mode 100644
index 0000000..94fb549
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.svg
@@ -0,0 +1,288 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
+<svg xmlns="http://www.w3.org/2000/svg">
+<metadata></metadata>
+<defs>
+<font id="glyphicons_halflingsregular" horiz-adv-x="1200" >
+<font-face units-per-em="1200" ascent="960" descent="-240" />
+<missing-glyph horiz-adv-x="500" />
+<glyph horiz-adv-x="0" />
+<glyph horiz-adv-x="400" />
+<glyph unicode=" " />
+<glyph unicode="*" d="M600 1100q15 0 34 -1.5t30 -3.5l11 -1q10 -2 17.5 -10.5t7.5 -18.5v-224l158 158q7 7 18 8t19 -6l106 -106q7 -8 6 -19t-8 -18l-158 -158h224q10 0 18.5 -7.5t10.5 -17.5q6 -41 6 -75q0 -15 -1.5 -34t-3.5 -30l-1 -11q-2 -10 -10.5 -17.5t-18.5 -7.5h-224l158 -158 q7 -7 8 -18t-6 -19l-106 -106q-8 -7 -19 -6t-18 8l-158 158v-224q0 -10 -7.5 -18.5t-17.5 -10.5q-41 -6 -75 -6q-15 0 -34 1.5t-30 3.5l-11 1q-10 2 -17.5 10.5t-7.5 18.5v224l-158 -158q-7 -7 -18 -8t-19 6l-106 106q-7 8 -6 19t8 18l158 158h-224q-10 0 -18.5 7.5 t-10.5 17.5q-6 41 -6 75q0 15 1.5 34t3.5 30l1 11q2 10 10.5 17.5t18.5 7.5h224l-158 158q-7 7 -8 18t6 19l106 106q8 7 19 6t18 -8l158 -158v224q0 10 7.5 18.5t17.5 10.5q41 6 75 6z" />
+<glyph unicode="+" d="M450 1100h200q21 0 35.5 -14.5t14.5 -35.5v-350h350q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-350v-350q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v350h-350q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5 h350v350q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xa0;" />
+<glyph unicode="&#xa5;" d="M825 1100h250q10 0 12.5 -5t-5.5 -13l-364 -364q-6 -6 -11 -18h268q10 0 13 -6t-3 -14l-120 -160q-6 -8 -18 -14t-22 -6h-125v-100h275q10 0 13 -6t-3 -14l-120 -160q-6 -8 -18 -14t-22 -6h-125v-174q0 -11 -7.5 -18.5t-18.5 -7.5h-148q-11 0 -18.5 7.5t-7.5 18.5v174 h-275q-10 0 -13 6t3 14l120 160q6 8 18 14t22 6h125v100h-275q-10 0 -13 6t3 14l120 160q6 8 18 14t22 6h118q-5 12 -11 18l-364 364q-8 8 -5.5 13t12.5 5h250q25 0 43 -18l164 -164q8 -8 18 -8t18 8l164 164q18 18 43 18z" />
+<glyph unicode="&#x2000;" horiz-adv-x="650" />
+<glyph unicode="&#x2001;" horiz-adv-x="1300" />
+<glyph unicode="&#x2002;" horiz-adv-x="650" />
+<glyph unicode="&#x2003;" horiz-adv-x="1300" />
+<glyph unicode="&#x2004;" horiz-adv-x="433" />
+<glyph unicode="&#x2005;" horiz-adv-x="325" />
+<glyph unicode="&#x2006;" horiz-adv-x="216" />
+<glyph unicode="&#x2007;" horiz-adv-x="216" />
+<glyph unicode="&#x2008;" horiz-adv-x="162" />
+<glyph unicode="&#x2009;" horiz-adv-x="260" />
+<glyph unicode="&#x200a;" horiz-adv-x="72" />
+<glyph unicode="&#x202f;" horiz-adv-x="260" />
+<glyph unicode="&#x205f;" horiz-adv-x="325" />
+<glyph unicode="&#x20ac;" d="M744 1198q242 0 354 -189q60 -104 66 -209h-181q0 45 -17.5 82.5t-43.5 61.5t-58 40.5t-60.5 24t-51.5 7.5q-19 0 -40.5 -5.5t-49.5 -20.5t-53 -38t-49 -62.5t-39 -89.5h379l-100 -100h-300q-6 -50 -6 -100h406l-100 -100h-300q9 -74 33 -132t52.5 -91t61.5 -54.5t59 -29 t47 -7.5q22 0 50.5 7.5t60.5 24.5t58 41t43.5 61t17.5 80h174q-30 -171 -128 -278q-107 -117 -274 -117q-206 0 -324 158q-36 48 -69 133t-45 204h-217l100 100h112q1 47 6 100h-218l100 100h134q20 87 51 153.5t62 103.5q117 141 297 141z" />
+<glyph unicode="&#x20bd;" d="M428 1200h350q67 0 120 -13t86 -31t57 -49.5t35 -56.5t17 -64.5t6.5 -60.5t0.5 -57v-16.5v-16.5q0 -36 -0.5 -57t-6.5 -61t-17 -65t-35 -57t-57 -50.5t-86 -31.5t-120 -13h-178l-2 -100h288q10 0 13 -6t-3 -14l-120 -160q-6 -8 -18 -14t-22 -6h-138v-175q0 -11 -5.5 -18 t-15.5 -7h-149q-10 0 -17.5 7.5t-7.5 17.5v175h-267q-10 0 -13 6t3 14l120 160q6 8 18 14t22 6h117v100h-267q-10 0 -13 6t3 14l120 160q6 8 18 14t22 6h117v475q0 10 7.5 17.5t17.5 7.5zM600 1000v-300h203q64 0 86.5 33t22.5 119q0 84 -22.5 116t-86.5 32h-203z" />
+<glyph unicode="&#x2212;" d="M250 700h800q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#x231b;" d="M1000 1200v-150q0 -21 -14.5 -35.5t-35.5 -14.5h-50v-100q0 -91 -49.5 -165.5t-130.5 -109.5q81 -35 130.5 -109.5t49.5 -165.5v-150h50q21 0 35.5 -14.5t14.5 -35.5v-150h-800v150q0 21 14.5 35.5t35.5 14.5h50v150q0 91 49.5 165.5t130.5 109.5q-81 35 -130.5 109.5 t-49.5 165.5v100h-50q-21 0 -35.5 14.5t-14.5 35.5v150h800zM400 1000v-100q0 -60 32.5 -109.5t87.5 -73.5q28 -12 44 -37t16 -55t-16 -55t-44 -37q-55 -24 -87.5 -73.5t-32.5 -109.5v-150h400v150q0 60 -32.5 109.5t-87.5 73.5q-28 12 -44 37t-16 55t16 55t44 37 q55 24 87.5 73.5t32.5 109.5v100h-400z" />
+<glyph unicode="&#x25fc;" horiz-adv-x="500" d="M0 0z" />
+<glyph unicode="&#x2601;" d="M503 1089q110 0 200.5 -59.5t134.5 -156.5q44 14 90 14q120 0 205 -86.5t85 -206.5q0 -121 -85 -207.5t-205 -86.5h-750q-79 0 -135.5 57t-56.5 137q0 69 42.5 122.5t108.5 67.5q-2 12 -2 37q0 153 108 260.5t260 107.5z" />
+<glyph unicode="&#x26fa;" d="M774 1193.5q16 -9.5 20.5 -27t-5.5 -33.5l-136 -187l467 -746h30q20 0 35 -18.5t15 -39.5v-42h-1200v42q0 21 15 39.5t35 18.5h30l468 746l-135 183q-10 16 -5.5 34t20.5 28t34 5.5t28 -20.5l111 -148l112 150q9 16 27 20.5t34 -5zM600 200h377l-182 112l-195 534v-646z " />
+<glyph unicode="&#x2709;" d="M25 1100h1150q10 0 12.5 -5t-5.5 -13l-564 -567q-8 -8 -18 -8t-18 8l-564 567q-8 8 -5.5 13t12.5 5zM18 882l264 -264q8 -8 8 -18t-8 -18l-264 -264q-8 -8 -13 -5.5t-5 12.5v550q0 10 5 12.5t13 -5.5zM918 618l264 264q8 8 13 5.5t5 -12.5v-550q0 -10 -5 -12.5t-13 5.5 l-264 264q-8 8 -8 18t8 18zM818 482l364 -364q8 -8 5.5 -13t-12.5 -5h-1150q-10 0 -12.5 5t5.5 13l364 364q8 8 18 8t18 -8l164 -164q8 -8 18 -8t18 8l164 164q8 8 18 8t18 -8z" />
+<glyph unicode="&#x270f;" d="M1011 1210q19 0 33 -13l153 -153q13 -14 13 -33t-13 -33l-99 -92l-214 214l95 96q13 14 32 14zM1013 800l-615 -614l-214 214l614 614zM317 96l-333 -112l110 335z" />
+<glyph unicode="&#xe001;" d="M700 650v-550h250q21 0 35.5 -14.5t14.5 -35.5v-50h-800v50q0 21 14.5 35.5t35.5 14.5h250v550l-500 550h1200z" />
+<glyph unicode="&#xe002;" d="M368 1017l645 163q39 15 63 0t24 -49v-831q0 -55 -41.5 -95.5t-111.5 -63.5q-79 -25 -147 -4.5t-86 75t25.5 111.5t122.5 82q72 24 138 8v521l-600 -155v-606q0 -42 -44 -90t-109 -69q-79 -26 -147 -5.5t-86 75.5t25.5 111.5t122.5 82.5q72 24 138 7v639q0 38 14.5 59 t53.5 34z" />
+<glyph unicode="&#xe003;" d="M500 1191q100 0 191 -39t156.5 -104.5t104.5 -156.5t39 -191l-1 -2l1 -5q0 -141 -78 -262l275 -274q23 -26 22.5 -44.5t-22.5 -42.5l-59 -58q-26 -20 -46.5 -20t-39.5 20l-275 274q-119 -77 -261 -77l-5 1l-2 -1q-100 0 -191 39t-156.5 104.5t-104.5 156.5t-39 191 t39 191t104.5 156.5t156.5 104.5t191 39zM500 1022q-88 0 -162 -43t-117 -117t-43 -162t43 -162t117 -117t162 -43t162 43t117 117t43 162t-43 162t-117 117t-162 43z" />
+<glyph unicode="&#xe005;" d="M649 949q48 68 109.5 104t121.5 38.5t118.5 -20t102.5 -64t71 -100.5t27 -123q0 -57 -33.5 -117.5t-94 -124.5t-126.5 -127.5t-150 -152.5t-146 -174q-62 85 -145.5 174t-150 152.5t-126.5 127.5t-93.5 124.5t-33.5 117.5q0 64 28 123t73 100.5t104 64t119 20 t120.5 -38.5t104.5 -104z" />
+<glyph unicode="&#xe006;" d="M407 800l131 353q7 19 17.5 19t17.5 -19l129 -353h421q21 0 24 -8.5t-14 -20.5l-342 -249l130 -401q7 -20 -0.5 -25.5t-24.5 6.5l-343 246l-342 -247q-17 -12 -24.5 -6.5t-0.5 25.5l130 400l-347 251q-17 12 -14 20.5t23 8.5h429z" />
+<glyph unicode="&#xe007;" d="M407 800l131 353q7 19 17.5 19t17.5 -19l129 -353h421q21 0 24 -8.5t-14 -20.5l-342 -249l130 -401q7 -20 -0.5 -25.5t-24.5 6.5l-343 246l-342 -247q-17 -12 -24.5 -6.5t-0.5 25.5l130 400l-347 251q-17 12 -14 20.5t23 8.5h429zM477 700h-240l197 -142l-74 -226 l193 139l195 -140l-74 229l192 140h-234l-78 211z" />
+<glyph unicode="&#xe008;" d="M600 1200q124 0 212 -88t88 -212v-250q0 -46 -31 -98t-69 -52v-75q0 -10 6 -21.5t15 -17.5l358 -230q9 -5 15 -16.5t6 -21.5v-93q0 -10 -7.5 -17.5t-17.5 -7.5h-1150q-10 0 -17.5 7.5t-7.5 17.5v93q0 10 6 21.5t15 16.5l358 230q9 6 15 17.5t6 21.5v75q-38 0 -69 52 t-31 98v250q0 124 88 212t212 88z" />
+<glyph unicode="&#xe009;" d="M25 1100h1150q10 0 17.5 -7.5t7.5 -17.5v-1050q0 -10 -7.5 -17.5t-17.5 -7.5h-1150q-10 0 -17.5 7.5t-7.5 17.5v1050q0 10 7.5 17.5t17.5 7.5zM100 1000v-100h100v100h-100zM875 1000h-550q-10 0 -17.5 -7.5t-7.5 -17.5v-350q0 -10 7.5 -17.5t17.5 -7.5h550 q10 0 17.5 7.5t7.5 17.5v350q0 10 -7.5 17.5t-17.5 7.5zM1000 1000v-100h100v100h-100zM100 800v-100h100v100h-100zM1000 800v-100h100v100h-100zM100 600v-100h100v100h-100zM1000 600v-100h100v100h-100zM875 500h-550q-10 0 -17.5 -7.5t-7.5 -17.5v-350q0 -10 7.5 -17.5 t17.5 -7.5h550q10 0 17.5 7.5t7.5 17.5v350q0 10 -7.5 17.5t-17.5 7.5zM100 400v-100h100v100h-100zM1000 400v-100h100v100h-100zM100 200v-100h100v100h-100zM1000 200v-100h100v100h-100z" />
+<glyph unicode="&#xe010;" d="M50 1100h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM650 1100h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400 q0 21 14.5 35.5t35.5 14.5zM50 500h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM650 500h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400 q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe011;" d="M50 1100h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 1100h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200 q0 21 14.5 35.5t35.5 14.5zM850 1100h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM50 700h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200 q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 700h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM850 700h200q21 0 35.5 -14.5t14.5 -35.5v-200 q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM50 300h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 3
 00h200 q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM850 300h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5 t35.5 14.5z" />
+<glyph unicode="&#xe012;" d="M50 1100h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 1100h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v200 q0 21 14.5 35.5t35.5 14.5zM50 700h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 700h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700 q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM50 300h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5zM450 300h700q21 0 35.5 -14.5t14.5 -35.5v-200 q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe013;" d="M465 477l571 571q8 8 18 8t17 -8l177 -177q8 -7 8 -17t-8 -18l-783 -784q-7 -8 -17.5 -8t-17.5 8l-384 384q-8 8 -8 18t8 17l177 177q7 8 17 8t18 -8l171 -171q7 -7 18 -7t18 7z" />
+<glyph unicode="&#xe014;" d="M904 1083l178 -179q8 -8 8 -18.5t-8 -17.5l-267 -268l267 -268q8 -7 8 -17.5t-8 -18.5l-178 -178q-8 -8 -18.5 -8t-17.5 8l-268 267l-268 -267q-7 -8 -17.5 -8t-18.5 8l-178 178q-8 8 -8 18.5t8 17.5l267 268l-267 268q-8 7 -8 17.5t8 18.5l178 178q8 8 18.5 8t17.5 -8 l268 -267l268 268q7 7 17.5 7t18.5 -7z" />
+<glyph unicode="&#xe015;" d="M507 1177q98 0 187.5 -38.5t154.5 -103.5t103.5 -154.5t38.5 -187.5q0 -141 -78 -262l300 -299q8 -8 8 -18.5t-8 -18.5l-109 -108q-7 -8 -17.5 -8t-18.5 8l-300 299q-119 -77 -261 -77q-98 0 -188 38.5t-154.5 103t-103 154.5t-38.5 188t38.5 187.5t103 154.5 t154.5 103.5t188 38.5zM506.5 1023q-89.5 0 -165.5 -44t-120 -120.5t-44 -166t44 -165.5t120 -120t165.5 -44t166 44t120.5 120t44 165.5t-44 166t-120.5 120.5t-166 44zM425 900h150q10 0 17.5 -7.5t7.5 -17.5v-75h75q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5 t-17.5 -7.5h-75v-75q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v75h-75q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h75v75q0 10 7.5 17.5t17.5 7.5z" />
+<glyph unicode="&#xe016;" d="M507 1177q98 0 187.5 -38.5t154.5 -103.5t103.5 -154.5t38.5 -187.5q0 -141 -78 -262l300 -299q8 -8 8 -18.5t-8 -18.5l-109 -108q-7 -8 -17.5 -8t-18.5 8l-300 299q-119 -77 -261 -77q-98 0 -188 38.5t-154.5 103t-103 154.5t-38.5 188t38.5 187.5t103 154.5 t154.5 103.5t188 38.5zM506.5 1023q-89.5 0 -165.5 -44t-120 -120.5t-44 -166t44 -165.5t120 -120t165.5 -44t166 44t120.5 120t44 165.5t-44 166t-120.5 120.5t-166 44zM325 800h350q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-350q-10 0 -17.5 7.5 t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5z" />
+<glyph unicode="&#xe017;" d="M550 1200h100q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5zM800 975v166q167 -62 272 -209.5t105 -331.5q0 -117 -45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5 t-184.5 123t-123 184.5t-45.5 224q0 184 105 331.5t272 209.5v-166q-103 -55 -165 -155t-62 -220q0 -116 57 -214.5t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5q0 120 -62 220t-165 155z" />
+<glyph unicode="&#xe018;" d="M1025 1200h150q10 0 17.5 -7.5t7.5 -17.5v-1150q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v1150q0 10 7.5 17.5t17.5 7.5zM725 800h150q10 0 17.5 -7.5t7.5 -17.5v-750q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v750 q0 10 7.5 17.5t17.5 7.5zM425 500h150q10 0 17.5 -7.5t7.5 -17.5v-450q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v450q0 10 7.5 17.5t17.5 7.5zM125 300h150q10 0 17.5 -7.5t7.5 -17.5v-250q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5 v250q0 10 7.5 17.5t17.5 7.5z" />
+<glyph unicode="&#xe019;" d="M600 1174q33 0 74 -5l38 -152l5 -1q49 -14 94 -39l5 -2l134 80q61 -48 104 -105l-80 -134l3 -5q25 -44 39 -93l1 -6l152 -38q5 -43 5 -73q0 -34 -5 -74l-152 -38l-1 -6q-15 -49 -39 -93l-3 -5l80 -134q-48 -61 -104 -105l-134 81l-5 -3q-44 -25 -94 -39l-5 -2l-38 -151 q-43 -5 -74 -5q-33 0 -74 5l-38 151l-5 2q-49 14 -94 39l-5 3l-134 -81q-60 48 -104 105l80 134l-3 5q-25 45 -38 93l-2 6l-151 38q-6 42 -6 74q0 33 6 73l151 38l2 6q13 48 38 93l3 5l-80 134q47 61 105 105l133 -80l5 2q45 25 94 39l5 1l38 152q43 5 74 5zM600 815 q-89 0 -152 -63t-63 -151.5t63 -151.5t152 -63t152 63t63 151.5t-63 151.5t-152 63z" />
+<glyph unicode="&#xe020;" d="M500 1300h300q41 0 70.5 -29.5t29.5 -70.5v-100h275q10 0 17.5 -7.5t7.5 -17.5v-75h-1100v75q0 10 7.5 17.5t17.5 7.5h275v100q0 41 29.5 70.5t70.5 29.5zM500 1200v-100h300v100h-300zM1100 900v-800q0 -41 -29.5 -70.5t-70.5 -29.5h-700q-41 0 -70.5 29.5t-29.5 70.5 v800h900zM300 800v-700h100v700h-100zM500 800v-700h100v700h-100zM700 800v-700h100v700h-100zM900 800v-700h100v700h-100z" />
+<glyph unicode="&#xe021;" d="M18 618l620 608q8 7 18.5 7t17.5 -7l608 -608q8 -8 5.5 -13t-12.5 -5h-175v-575q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v375h-300v-375q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v575h-175q-10 0 -12.5 5t5.5 13z" />
+<glyph unicode="&#xe022;" d="M600 1200v-400q0 -41 29.5 -70.5t70.5 -29.5h300v-650q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v1100q0 21 14.5 35.5t35.5 14.5h450zM1000 800h-250q-21 0 -35.5 14.5t-14.5 35.5v250z" />
+<glyph unicode="&#xe023;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM525 900h50q10 0 17.5 -7.5t7.5 -17.5v-275h175q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-250q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5z" />
+<glyph unicode="&#xe024;" d="M1300 0h-538l-41 400h-242l-41 -400h-538l431 1200h209l-21 -300h162l-20 300h208zM515 800l-27 -300h224l-27 300h-170z" />
+<glyph unicode="&#xe025;" d="M550 1200h200q21 0 35.5 -14.5t14.5 -35.5v-450h191q20 0 25.5 -11.5t-7.5 -27.5l-327 -400q-13 -16 -32 -16t-32 16l-327 400q-13 16 -7.5 27.5t25.5 11.5h191v450q0 21 14.5 35.5t35.5 14.5zM1125 400h50q10 0 17.5 -7.5t7.5 -17.5v-350q0 -10 -7.5 -17.5t-17.5 -7.5 h-1050q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5h50q10 0 17.5 -7.5t7.5 -17.5v-175h900v175q0 10 7.5 17.5t17.5 7.5z" />
+<glyph unicode="&#xe026;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM525 900h150q10 0 17.5 -7.5t7.5 -17.5v-275h137q21 0 26 -11.5t-8 -27.5l-223 -275q-13 -16 -32 -16t-32 16l-223 275q-13 16 -8 27.5t26 11.5h137v275q0 10 7.5 17.5t17.5 7.5z " />
+<glyph unicode="&#xe027;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM632 914l223 -275q13 -16 8 -27.5t-26 -11.5h-137v-275q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v275h-137q-21 0 -26 11.5t8 27.5l223 275q13 16 32 16 t32 -16z" />
+<glyph unicode="&#xe028;" d="M225 1200h750q10 0 19.5 -7t12.5 -17l186 -652q7 -24 7 -49v-425q0 -12 -4 -27t-9 -17q-12 -6 -37 -6h-1100q-12 0 -27 4t-17 8q-6 13 -6 38l1 425q0 25 7 49l185 652q3 10 12.5 17t19.5 7zM878 1000h-556q-10 0 -19 -7t-11 -18l-87 -450q-2 -11 4 -18t16 -7h150 q10 0 19.5 -7t11.5 -17l38 -152q2 -10 11.5 -17t19.5 -7h250q10 0 19.5 7t11.5 17l38 152q2 10 11.5 17t19.5 7h150q10 0 16 7t4 18l-87 450q-2 11 -11 18t-19 7z" />
+<glyph unicode="&#xe029;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM540 820l253 -190q17 -12 17 -30t-17 -30l-253 -190q-16 -12 -28 -6.5t-12 26.5v400q0 21 12 26.5t28 -6.5z" />
+<glyph unicode="&#xe030;" d="M947 1060l135 135q7 7 12.5 5t5.5 -13v-362q0 -10 -7.5 -17.5t-17.5 -7.5h-362q-11 0 -13 5.5t5 12.5l133 133q-109 76 -238 76q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5h150q0 -117 -45.5 -224 t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5q192 0 347 -117z" />
+<glyph unicode="&#xe031;" d="M947 1060l135 135q7 7 12.5 5t5.5 -13v-361q0 -11 -7.5 -18.5t-18.5 -7.5h-361q-11 0 -13 5.5t5 12.5l134 134q-110 75 -239 75q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5h-150q0 117 45.5 224t123 184.5t184.5 123t224 45.5q192 0 347 -117zM1027 600h150 q0 -117 -45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5q-192 0 -348 118l-134 -134q-7 -8 -12.5 -5.5t-5.5 12.5v360q0 11 7.5 18.5t18.5 7.5h360q10 0 12.5 -5.5t-5.5 -12.5l-133 -133q110 -76 240 -76q116 0 214.5 57t155.5 155.5t57 214.5z" />
+<glyph unicode="&#xe032;" d="M125 1200h1050q10 0 17.5 -7.5t7.5 -17.5v-1150q0 -10 -7.5 -17.5t-17.5 -7.5h-1050q-10 0 -17.5 7.5t-7.5 17.5v1150q0 10 7.5 17.5t17.5 7.5zM1075 1000h-850q-10 0 -17.5 -7.5t-7.5 -17.5v-850q0 -10 7.5 -17.5t17.5 -7.5h850q10 0 17.5 7.5t7.5 17.5v850 q0 10 -7.5 17.5t-17.5 7.5zM325 900h50q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM525 900h450q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-450q-10 0 -17.5 7.5t-7.5 17.5v50 q0 10 7.5 17.5t17.5 7.5zM325 700h50q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM525 700h450q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-450q-10 0 -17.5 7.5t-7.5 17.5v50 q0 10 7.5 17.5t17.5 7.5zM325 500h50q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM525 500h450q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-
 450q-10 0 -17.5 7.5t-7.5 17.5v50 q0 10 7.5 17.5t17.5 7.5zM325 300h50q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5zM525 300h450q10 0 17.5 -7.5t7.5 -17.5v-50q0 -10 -7.5 -17.5t-17.5 -7.5h-450q-10 0 -17.5 7.5t-7.5 17.5v50 q0 10 7.5 17.5t17.5 7.5z" />
+<glyph unicode="&#xe033;" d="M900 800v200q0 83 -58.5 141.5t-141.5 58.5h-300q-82 0 -141 -59t-59 -141v-200h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-600q0 -41 29.5 -70.5t70.5 -29.5h900q41 0 70.5 29.5t29.5 70.5v600q0 41 -29.5 70.5t-70.5 29.5h-100zM400 800v150q0 21 15 35.5t35 14.5h200 q20 0 35 -14.5t15 -35.5v-150h-300z" />
+<glyph unicode="&#xe034;" d="M125 1100h50q10 0 17.5 -7.5t7.5 -17.5v-1075h-100v1075q0 10 7.5 17.5t17.5 7.5zM1075 1052q4 0 9 -2q16 -6 16 -23v-421q0 -6 -3 -12q-33 -59 -66.5 -99t-65.5 -58t-56.5 -24.5t-52.5 -6.5q-26 0 -57.5 6.5t-52.5 13.5t-60 21q-41 15 -63 22.5t-57.5 15t-65.5 7.5 q-85 0 -160 -57q-7 -5 -15 -5q-6 0 -11 3q-14 7 -14 22v438q22 55 82 98.5t119 46.5q23 2 43 0.5t43 -7t32.5 -8.5t38 -13t32.5 -11q41 -14 63.5 -21t57 -14t63.5 -7q103 0 183 87q7 8 18 8z" />
+<glyph unicode="&#xe035;" d="M600 1175q116 0 227 -49.5t192.5 -131t131 -192.5t49.5 -227v-300q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v300q0 127 -70.5 231.5t-184.5 161.5t-245 57t-245 -57t-184.5 -161.5t-70.5 -231.5v-300q0 -10 -7.5 -17.5t-17.5 -7.5h-50 q-10 0 -17.5 7.5t-7.5 17.5v300q0 116 49.5 227t131 192.5t192.5 131t227 49.5zM220 500h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14v460q0 8 6 14t14 6zM820 500h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14v460 q0 8 6 14t14 6z" />
+<glyph unicode="&#xe036;" d="M321 814l258 172q9 6 15 2.5t6 -13.5v-750q0 -10 -6 -13.5t-15 2.5l-258 172q-21 14 -46 14h-250q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5h250q25 0 46 14zM900 668l120 120q7 7 17 7t17 -7l34 -34q7 -7 7 -17t-7 -17l-120 -120l120 -120q7 -7 7 -17 t-7 -17l-34 -34q-7 -7 -17 -7t-17 7l-120 119l-120 -119q-7 -7 -17 -7t-17 7l-34 34q-7 7 -7 17t7 17l119 120l-119 120q-7 7 -7 17t7 17l34 34q7 8 17 8t17 -8z" />
+<glyph unicode="&#xe037;" d="M321 814l258 172q9 6 15 2.5t6 -13.5v-750q0 -10 -6 -13.5t-15 2.5l-258 172q-21 14 -46 14h-250q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5h250q25 0 46 14zM766 900h4q10 -1 16 -10q96 -129 96 -290q0 -154 -90 -281q-6 -9 -17 -10l-3 -1q-9 0 -16 6 l-29 23q-7 7 -8.5 16.5t4.5 17.5q72 103 72 229q0 132 -78 238q-6 8 -4.5 18t9.5 17l29 22q7 5 15 5z" />
+<glyph unicode="&#xe038;" d="M967 1004h3q11 -1 17 -10q135 -179 135 -396q0 -105 -34 -206.5t-98 -185.5q-7 -9 -17 -10h-3q-9 0 -16 6l-42 34q-8 6 -9 16t5 18q111 150 111 328q0 90 -29.5 176t-84.5 157q-6 9 -5 19t10 16l42 33q7 5 15 5zM321 814l258 172q9 6 15 2.5t6 -13.5v-750q0 -10 -6 -13.5 t-15 2.5l-258 172q-21 14 -46 14h-250q-10 0 -17.5 7.5t-7.5 17.5v350q0 10 7.5 17.5t17.5 7.5h250q25 0 46 14zM766 900h4q10 -1 16 -10q96 -129 96 -290q0 -154 -90 -281q-6 -9 -17 -10l-3 -1q-9 0 -16 6l-29 23q-7 7 -8.5 16.5t4.5 17.5q72 103 72 229q0 132 -78 238 q-6 8 -4.5 18.5t9.5 16.5l29 22q7 5 15 5z" />
+<glyph unicode="&#xe039;" d="M500 900h100v-100h-100v-100h-400v-100h-100v600h500v-300zM1200 700h-200v-100h200v-200h-300v300h-200v300h-100v200h600v-500zM100 1100v-300h300v300h-300zM800 1100v-300h300v300h-300zM300 900h-100v100h100v-100zM1000 900h-100v100h100v-100zM300 500h200v-500 h-500v500h200v100h100v-100zM800 300h200v-100h-100v-100h-200v100h-100v100h100v200h-200v100h300v-300zM100 400v-300h300v300h-300zM300 200h-100v100h100v-100zM1200 200h-100v100h100v-100zM700 0h-100v100h100v-100zM1200 0h-300v100h300v-100z" />
+<glyph unicode="&#xe040;" d="M100 200h-100v1000h100v-1000zM300 200h-100v1000h100v-1000zM700 200h-200v1000h200v-1000zM900 200h-100v1000h100v-1000zM1200 200h-200v1000h200v-1000zM400 0h-300v100h300v-100zM600 0h-100v91h100v-91zM800 0h-100v91h100v-91zM1100 0h-200v91h200v-91z" />
+<glyph unicode="&#xe041;" d="M500 1200l682 -682q8 -8 8 -18t-8 -18l-464 -464q-8 -8 -18 -8t-18 8l-682 682l1 475q0 10 7.5 17.5t17.5 7.5h474zM319.5 1024.5q-29.5 29.5 -71 29.5t-71 -29.5t-29.5 -71.5t29.5 -71.5t71 -29.5t71 29.5t29.5 71.5t-29.5 71.5z" />
+<glyph unicode="&#xe042;" d="M500 1200l682 -682q8 -8 8 -18t-8 -18l-464 -464q-8 -8 -18 -8t-18 8l-682 682l1 475q0 10 7.5 17.5t17.5 7.5h474zM800 1200l682 -682q8 -8 8 -18t-8 -18l-464 -464q-8 -8 -18 -8t-18 8l-56 56l424 426l-700 700h150zM319.5 1024.5q-29.5 29.5 -71 29.5t-71 -29.5 t-29.5 -71.5t29.5 -71.5t71 -29.5t71 29.5t29.5 71.5t-29.5 71.5z" />
+<glyph unicode="&#xe043;" d="M300 1200h825q75 0 75 -75v-900q0 -25 -18 -43l-64 -64q-8 -8 -13 -5.5t-5 12.5v950q0 10 -7.5 17.5t-17.5 7.5h-700q-25 0 -43 -18l-64 -64q-8 -8 -5.5 -13t12.5 -5h700q10 0 17.5 -7.5t7.5 -17.5v-950q0 -10 -7.5 -17.5t-17.5 -7.5h-850q-10 0 -17.5 7.5t-7.5 17.5v975 q0 25 18 43l139 139q18 18 43 18z" />
+<glyph unicode="&#xe044;" d="M250 1200h800q21 0 35.5 -14.5t14.5 -35.5v-1150l-450 444l-450 -445v1151q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe045;" d="M822 1200h-444q-11 0 -19 -7.5t-9 -17.5l-78 -301q-7 -24 7 -45l57 -108q6 -9 17.5 -15t21.5 -6h450q10 0 21.5 6t17.5 15l62 108q14 21 7 45l-83 301q-1 10 -9 17.5t-19 7.5zM1175 800h-150q-10 0 -21 -6.5t-15 -15.5l-78 -156q-4 -9 -15 -15.5t-21 -6.5h-550 q-10 0 -21 6.5t-15 15.5l-78 156q-4 9 -15 15.5t-21 6.5h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-650q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h750q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5 t7.5 17.5v650q0 10 -7.5 17.5t-17.5 7.5zM850 200h-500q-10 0 -19.5 -7t-11.5 -17l-38 -152q-2 -10 3.5 -17t15.5 -7h600q10 0 15.5 7t3.5 17l-38 152q-2 10 -11.5 17t-19.5 7z" />
+<glyph unicode="&#xe046;" d="M500 1100h200q56 0 102.5 -20.5t72.5 -50t44 -59t25 -50.5l6 -20h150q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v600q0 41 29.5 70.5t70.5 29.5h150q2 8 6.5 21.5t24 48t45 61t72 48t102.5 21.5zM900 800v-100 h100v100h-100zM600 730q-95 0 -162.5 -67.5t-67.5 -162.5t67.5 -162.5t162.5 -67.5t162.5 67.5t67.5 162.5t-67.5 162.5t-162.5 67.5zM600 603q43 0 73 -30t30 -73t-30 -73t-73 -30t-73 30t-30 73t30 73t73 30z" />
+<glyph unicode="&#xe047;" d="M681 1199l385 -998q20 -50 60 -92q18 -19 36.5 -29.5t27.5 -11.5l10 -2v-66h-417v66q53 0 75 43.5t5 88.5l-82 222h-391q-58 -145 -92 -234q-11 -34 -6.5 -57t25.5 -37t46 -20t55 -6v-66h-365v66q56 24 84 52q12 12 25 30.5t20 31.5l7 13l399 1006h93zM416 521h340 l-162 457z" />
+<glyph unicode="&#xe048;" d="M753 641q5 -1 14.5 -4.5t36 -15.5t50.5 -26.5t53.5 -40t50.5 -54.5t35.5 -70t14.5 -87q0 -67 -27.5 -125.5t-71.5 -97.5t-98.5 -66.5t-108.5 -40.5t-102 -13h-500v89q41 7 70.5 32.5t29.5 65.5v827q0 24 -0.5 34t-3.5 24t-8.5 19.5t-17 13.5t-28 12.5t-42.5 11.5v71 l471 -1q57 0 115.5 -20.5t108 -57t80.5 -94t31 -124.5q0 -51 -15.5 -96.5t-38 -74.5t-45 -50.5t-38.5 -30.5zM400 700h139q78 0 130.5 48.5t52.5 122.5q0 41 -8.5 70.5t-29.5 55.5t-62.5 39.5t-103.5 13.5h-118v-350zM400 200h216q80 0 121 50.5t41 130.5q0 90 -62.5 154.5 t-156.5 64.5h-159v-400z" />
+<glyph unicode="&#xe049;" d="M877 1200l2 -57q-83 -19 -116 -45.5t-40 -66.5l-132 -839q-9 -49 13 -69t96 -26v-97h-500v97q186 16 200 98l173 832q3 17 3 30t-1.5 22.5t-9 17.5t-13.5 12.5t-21.5 10t-26 8.5t-33.5 10q-13 3 -19 5v57h425z" />
+<glyph unicode="&#xe050;" d="M1300 900h-50q0 21 -4 37t-9.5 26.5t-18 17.5t-22 11t-28.5 5.5t-31 2t-37 0.5h-200v-850q0 -22 25 -34.5t50 -13.5l25 -2v-100h-400v100q4 0 11 0.5t24 3t30 7t24 15t11 24.5v850h-200q-25 0 -37 -0.5t-31 -2t-28.5 -5.5t-22 -11t-18 -17.5t-9.5 -26.5t-4 -37h-50v300 h1000v-300zM175 1000h-75v-800h75l-125 -167l-125 167h75v800h-75l125 167z" />
+<glyph unicode="&#xe051;" d="M1100 900h-50q0 21 -4 37t-9.5 26.5t-18 17.5t-22 11t-28.5 5.5t-31 2t-37 0.5h-200v-650q0 -22 25 -34.5t50 -13.5l25 -2v-100h-400v100q4 0 11 0.5t24 3t30 7t24 15t11 24.5v650h-200q-25 0 -37 -0.5t-31 -2t-28.5 -5.5t-22 -11t-18 -17.5t-9.5 -26.5t-4 -37h-50v300 h1000v-300zM1167 50l-167 -125v75h-800v-75l-167 125l167 125v-75h800v75z" />
+<glyph unicode="&#xe052;" d="M50 1100h600q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-600q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 800h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM50 500h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe053;" d="M250 1100h700q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 800h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM250 500h700q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe054;" d="M500 950v100q0 21 14.5 35.5t35.5 14.5h600q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-600q-21 0 -35.5 14.5t-14.5 35.5zM100 650v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000 q-21 0 -35.5 14.5t-14.5 35.5zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5zM0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100 q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe055;" d="M50 1100h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 800h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM50 500h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1100 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe056;" d="M50 1100h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM350 1100h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM50 800h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM350 800h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 500h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM350 500h800q21 0 35.5 -14.5t14.5 -35.5v-100 q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM350 200
 h800 q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe057;" d="M400 0h-100v1100h100v-1100zM550 1100h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM550 800h500q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-500 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM267 550l-167 -125v75h-200v100h200v75zM550 500h300q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM550 200h600 q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-600q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe058;" d="M50 1100h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM900 0h-100v1100h100v-1100zM50 800h500q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-500 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM1100 600h200v-100h-200v-75l-167 125l167 125v-75zM50 500h300q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5zM50 200h600 q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-600q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe059;" d="M75 1000h750q31 0 53 -22t22 -53v-650q0 -31 -22 -53t-53 -22h-750q-31 0 -53 22t-22 53v650q0 31 22 53t53 22zM1200 300l-300 300l300 300v-600z" />
+<glyph unicode="&#xe060;" d="M44 1100h1112q18 0 31 -13t13 -31v-1012q0 -18 -13 -31t-31 -13h-1112q-18 0 -31 13t-13 31v1012q0 18 13 31t31 13zM100 1000v-737l247 182l298 -131l-74 156l293 318l236 -288v500h-1000zM342 884q56 0 95 -39t39 -94.5t-39 -95t-95 -39.5t-95 39.5t-39 95t39 94.5 t95 39z" />
+<glyph unicode="&#xe062;" d="M648 1169q117 0 216 -60t156.5 -161t57.5 -218q0 -115 -70 -258q-69 -109 -158 -225.5t-143 -179.5l-54 -62q-9 8 -25.5 24.5t-63.5 67.5t-91 103t-98.5 128t-95.5 148q-60 132 -60 249q0 88 34 169.5t91.5 142t137 96.5t166.5 36zM652.5 974q-91.5 0 -156.5 -65 t-65 -157t65 -156.5t156.5 -64.5t156.5 64.5t65 156.5t-65 157t-156.5 65z" />
+<glyph unicode="&#xe063;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 173v854q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57z" />
+<glyph unicode="&#xe064;" d="M554 1295q21 -72 57.5 -143.5t76 -130t83 -118t82.5 -117t70 -116t49.5 -126t18.5 -136.5q0 -71 -25.5 -135t-68.5 -111t-99 -82t-118.5 -54t-125.5 -23q-84 5 -161.5 34t-139.5 78.5t-99 125t-37 164.5q0 69 18 136.5t49.5 126.5t69.5 116.5t81.5 117.5t83.5 119 t76.5 131t58.5 143zM344 710q-23 -33 -43.5 -70.5t-40.5 -102.5t-17 -123q1 -37 14.5 -69.5t30 -52t41 -37t38.5 -24.5t33 -15q21 -7 32 -1t13 22l6 34q2 10 -2.5 22t-13.5 19q-5 4 -14 12t-29.5 40.5t-32.5 73.5q-26 89 6 271q2 11 -6 11q-8 1 -15 -10z" />
+<glyph unicode="&#xe065;" d="M1000 1013l108 115q2 1 5 2t13 2t20.5 -1t25 -9.5t28.5 -21.5q22 -22 27 -43t0 -32l-6 -10l-108 -115zM350 1100h400q50 0 105 -13l-187 -187h-368q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v182l200 200v-332 q0 -165 -93.5 -257.5t-256.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5zM1009 803l-362 -362l-161 -50l55 170l355 355z" />
+<glyph unicode="&#xe066;" d="M350 1100h361q-164 -146 -216 -200h-195q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5l200 153v-103q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5z M824 1073l339 -301q8 -7 8 -17.5t-8 -17.5l-340 -306q-7 -6 -12.5 -4t-6.5 11v203q-26 1 -54.5 0t-78.5 -7.5t-92 -17.5t-86 -35t-70 -57q10 59 33 108t51.5 81.5t65 58.5t68.5 40.5t67 24.5t56 13.5t40 4.5v210q1 10 6.5 12.5t13.5 -4.5z" />
+<glyph unicode="&#xe067;" d="M350 1100h350q60 0 127 -23l-178 -177h-349q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v69l200 200v-219q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5z M643 639l395 395q7 7 17.5 7t17.5 -7l101 -101q7 -7 7 -17.5t-7 -17.5l-531 -532q-7 -7 -17.5 -7t-17.5 7l-248 248q-7 7 -7 17.5t7 17.5l101 101q7 7 17.5 7t17.5 -7l111 -111q8 -7 18 -7t18 7z" />
+<glyph unicode="&#xe068;" d="M318 918l264 264q8 8 18 8t18 -8l260 -264q7 -8 4.5 -13t-12.5 -5h-170v-200h200v173q0 10 5 12t13 -5l264 -260q8 -7 8 -17.5t-8 -17.5l-264 -265q-8 -7 -13 -5t-5 12v173h-200v-200h170q10 0 12.5 -5t-4.5 -13l-260 -264q-8 -8 -18 -8t-18 8l-264 264q-8 8 -5.5 13 t12.5 5h175v200h-200v-173q0 -10 -5 -12t-13 5l-264 265q-8 7 -8 17.5t8 17.5l264 260q8 7 13 5t5 -12v-173h200v200h-175q-10 0 -12.5 5t5.5 13z" />
+<glyph unicode="&#xe069;" d="M250 1100h100q21 0 35.5 -14.5t14.5 -35.5v-438l464 453q15 14 25.5 10t10.5 -25v-1000q0 -21 -10.5 -25t-25.5 10l-464 453v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v1000q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe070;" d="M50 1100h100q21 0 35.5 -14.5t14.5 -35.5v-438l464 453q15 14 25.5 10t10.5 -25v-438l464 453q15 14 25.5 10t10.5 -25v-1000q0 -21 -10.5 -25t-25.5 10l-464 453v-438q0 -21 -10.5 -25t-25.5 10l-464 453v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5 t-14.5 35.5v1000q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe071;" d="M1200 1050v-1000q0 -21 -10.5 -25t-25.5 10l-464 453v-438q0 -21 -10.5 -25t-25.5 10l-492 480q-15 14 -15 35t15 35l492 480q15 14 25.5 10t10.5 -25v-438l464 453q15 14 25.5 10t10.5 -25z" />
+<glyph unicode="&#xe072;" d="M243 1074l814 -498q18 -11 18 -26t-18 -26l-814 -498q-18 -11 -30.5 -4t-12.5 28v1000q0 21 12.5 28t30.5 -4z" />
+<glyph unicode="&#xe073;" d="M250 1000h200q21 0 35.5 -14.5t14.5 -35.5v-800q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v800q0 21 14.5 35.5t35.5 14.5zM650 1000h200q21 0 35.5 -14.5t14.5 -35.5v-800q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v800 q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe074;" d="M1100 950v-800q0 -21 -14.5 -35.5t-35.5 -14.5h-800q-21 0 -35.5 14.5t-14.5 35.5v800q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5z" />
+<glyph unicode="&#xe075;" d="M500 612v438q0 21 10.5 25t25.5 -10l492 -480q15 -14 15 -35t-15 -35l-492 -480q-15 -14 -25.5 -10t-10.5 25v438l-464 -453q-15 -14 -25.5 -10t-10.5 25v1000q0 21 10.5 25t25.5 -10z" />
+<glyph unicode="&#xe076;" d="M1048 1102l100 1q20 0 35 -14.5t15 -35.5l5 -1000q0 -21 -14.5 -35.5t-35.5 -14.5l-100 -1q-21 0 -35.5 14.5t-14.5 35.5l-2 437l-463 -454q-14 -15 -24.5 -10.5t-10.5 25.5l-2 437l-462 -455q-15 -14 -25.5 -9.5t-10.5 24.5l-5 1000q0 21 10.5 25.5t25.5 -10.5l466 -450 l-2 438q0 20 10.5 24.5t25.5 -9.5l466 -451l-2 438q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe077;" d="M850 1100h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438l-464 -453q-15 -14 -25.5 -10t-10.5 25v1000q0 21 10.5 25t25.5 -10l464 -453v438q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe078;" d="M686 1081l501 -540q15 -15 10.5 -26t-26.5 -11h-1042q-22 0 -26.5 11t10.5 26l501 540q15 15 36 15t36 -15zM150 400h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe079;" d="M885 900l-352 -353l352 -353l-197 -198l-552 552l552 550z" />
+<glyph unicode="&#xe080;" d="M1064 547l-551 -551l-198 198l353 353l-353 353l198 198z" />
+<glyph unicode="&#xe081;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM650 900h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-150h-150 q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -21 14.5 -35.5t35.5 -14.5h150v-150q0 -21 14.5 -35.5t35.5 -14.5h100q21 0 35.5 14.5t14.5 35.5v150h150q21 0 35.5 14.5t14.5 35.5v100q0 21 -14.5 35.5t-35.5 14.5h-150v150q0 21 -14.5 35.5t-35.5 14.5z" />
+<glyph unicode="&#xe082;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM850 700h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -21 14.5 -35.5 t35.5 -14.5h500q21 0 35.5 14.5t14.5 35.5v100q0 21 -14.5 35.5t-35.5 14.5z" />
+<glyph unicode="&#xe083;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM741.5 913q-12.5 0 -21.5 -9l-120 -120l-120 120q-9 9 -21.5 9 t-21.5 -9l-141 -141q-9 -9 -9 -21.5t9 -21.5l120 -120l-120 -120q-9 -9 -9 -21.5t9 -21.5l141 -141q9 -9 21.5 -9t21.5 9l120 120l120 -120q9 -9 21.5 -9t21.5 9l141 141q9 9 9 21.5t-9 21.5l-120 120l120 120q9 9 9 21.5t-9 21.5l-141 141q-9 9 -21.5 9z" />
+<glyph unicode="&#xe084;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM546 623l-84 85q-7 7 -17.5 7t-18.5 -7l-139 -139q-7 -8 -7 -18t7 -18 l242 -241q7 -8 17.5 -8t17.5 8l375 375q7 7 7 17.5t-7 18.5l-139 139q-7 7 -17.5 7t-17.5 -7z" />
+<glyph unicode="&#xe085;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM588 941q-29 0 -59 -5.5t-63 -20.5t-58 -38.5t-41.5 -63t-16.5 -89.5 q0 -25 20 -25h131q30 -5 35 11q6 20 20.5 28t45.5 8q20 0 31.5 -10.5t11.5 -28.5q0 -23 -7 -34t-26 -18q-1 0 -13.5 -4t-19.5 -7.5t-20 -10.5t-22 -17t-18.5 -24t-15.5 -35t-8 -46q-1 -8 5.5 -16.5t20.5 -8.5h173q7 0 22 8t35 28t37.5 48t29.5 74t12 100q0 47 -17 83 t-42.5 57t-59.5 34.5t-64 18t-59 4.5zM675 400h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-150q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v150q0 10 -7.5 17.5t-17.5 7.5z" />
+<glyph unicode="&#xe086;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM675 1000h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-150q0 -10 7.5 -17.5 t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v150q0 10 -7.5 17.5t-17.5 7.5zM675 700h-250q-10 0 -17.5 -7.5t-7.5 -17.5v-50q0 -10 7.5 -17.5t17.5 -7.5h75v-200h-75q-10 0 -17.5 -7.5t-7.5 -17.5v-50q0 -10 7.5 -17.5t17.5 -7.5h350q10 0 17.5 7.5t7.5 17.5v50q0 10 -7.5 17.5 t-17.5 7.5h-75v275q0 10 -7.5 17.5t-17.5 7.5z" />
+<glyph unicode="&#xe087;" d="M525 1200h150q10 0 17.5 -7.5t7.5 -17.5v-194q103 -27 178.5 -102.5t102.5 -178.5h194q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-194q-27 -103 -102.5 -178.5t-178.5 -102.5v-194q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v194 q-103 27 -178.5 102.5t-102.5 178.5h-194q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h194q27 103 102.5 178.5t178.5 102.5v194q0 10 7.5 17.5t17.5 7.5zM700 893v-168q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v168q-68 -23 -119 -74 t-74 -119h168q10 0 17.5 -7.5t7.5 -17.5v-150q0 -10 -7.5 -17.5t-17.5 -7.5h-168q23 -68 74 -119t119 -74v168q0 10 7.5 17.5t17.5 7.5h150q10 0 17.5 -7.5t7.5 -17.5v-168q68 23 119 74t74 119h-168q-10 0 -17.5 7.5t-7.5 17.5v150q0 10 7.5 17.5t17.5 7.5h168 q-23 68 -74 119t-119 74z" />
+<glyph unicode="&#xe088;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM759 823l64 -64q7 -7 7 -17.5t-7 -17.5l-124 -124l124 -124q7 -7 7 -17.5t-7 -17.5l-64 -64q-7 -7 -17.5 -7t-17.5 7l-124 124l-124 -124q-7 -7 -17.5 -7t-17.5 7l-64 64 q-7 7 -7 17.5t7 17.5l124 124l-124 124q-7 7 -7 17.5t7 17.5l64 64q7 7 17.5 7t17.5 -7l124 -124l124 124q7 7 17.5 7t17.5 -7z" />
+<glyph unicode="&#xe089;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5t57 -214.5 t155.5 -155.5t214.5 -57t214.5 57t155.5 155.5t57 214.5t-57 214.5t-155.5 155.5t-214.5 57zM782 788l106 -106q7 -7 7 -17.5t-7 -17.5l-320 -321q-8 -7 -18 -7t-18 7l-202 203q-8 7 -8 17.5t8 17.5l106 106q7 8 17.5 8t17.5 -8l79 -79l197 197q7 7 17.5 7t17.5 -7z" />
+<glyph unicode="&#xe090;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM600 1027q-116 0 -214.5 -57t-155.5 -155.5t-57 -214.5q0 -120 65 -225 l587 587q-105 65 -225 65zM965 819l-584 -584q104 -62 219 -62q116 0 214.5 57t155.5 155.5t57 214.5q0 115 -62 219z" />
+<glyph unicode="&#xe091;" d="M39 582l522 427q16 13 27.5 8t11.5 -26v-291h550q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-550v-291q0 -21 -11.5 -26t-27.5 8l-522 427q-16 13 -16 32t16 32z" />
+<glyph unicode="&#xe092;" d="M639 1009l522 -427q16 -13 16 -32t-16 -32l-522 -427q-16 -13 -27.5 -8t-11.5 26v291h-550q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h550v291q0 21 11.5 26t27.5 -8z" />
+<glyph unicode="&#xe093;" d="M682 1161l427 -522q13 -16 8 -27.5t-26 -11.5h-291v-550q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v550h-291q-21 0 -26 11.5t8 27.5l427 522q13 16 32 16t32 -16z" />
+<glyph unicode="&#xe094;" d="M550 1200h200q21 0 35.5 -14.5t14.5 -35.5v-550h291q21 0 26 -11.5t-8 -27.5l-427 -522q-13 -16 -32 -16t-32 16l-427 522q-13 16 -8 27.5t26 11.5h291v550q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe095;" d="M639 1109l522 -427q16 -13 16 -32t-16 -32l-522 -427q-16 -13 -27.5 -8t-11.5 26v291q-94 -2 -182 -20t-170.5 -52t-147 -92.5t-100.5 -135.5q5 105 27 193.5t67.5 167t113 135t167 91.5t225.5 42v262q0 21 11.5 26t27.5 -8z" />
+<glyph unicode="&#xe096;" d="M850 1200h300q21 0 35.5 -14.5t14.5 -35.5v-300q0 -21 -10.5 -25t-24.5 10l-94 94l-249 -249q-8 -7 -18 -7t-18 7l-106 106q-7 8 -7 18t7 18l249 249l-94 94q-14 14 -10 24.5t25 10.5zM350 0h-300q-21 0 -35.5 14.5t-14.5 35.5v300q0 21 10.5 25t24.5 -10l94 -94l249 249 q8 7 18 7t18 -7l106 -106q7 -8 7 -18t-7 -18l-249 -249l94 -94q14 -14 10 -24.5t-25 -10.5z" />
+<glyph unicode="&#xe097;" d="M1014 1120l106 -106q7 -8 7 -18t-7 -18l-249 -249l94 -94q14 -14 10 -24.5t-25 -10.5h-300q-21 0 -35.5 14.5t-14.5 35.5v300q0 21 10.5 25t24.5 -10l94 -94l249 249q8 7 18 7t18 -7zM250 600h300q21 0 35.5 -14.5t14.5 -35.5v-300q0 -21 -10.5 -25t-24.5 10l-94 94 l-249 -249q-8 -7 -18 -7t-18 7l-106 106q-7 8 -7 18t7 18l249 249l-94 94q-14 14 -10 24.5t25 10.5z" />
+<glyph unicode="&#xe101;" d="M600 1177q117 0 224 -45.5t184.5 -123t123 -184.5t45.5 -224t-45.5 -224t-123 -184.5t-184.5 -123t-224 -45.5t-224 45.5t-184.5 123t-123 184.5t-45.5 224t45.5 224t123 184.5t184.5 123t224 45.5zM704 900h-208q-20 0 -32 -14.5t-8 -34.5l58 -302q4 -20 21.5 -34.5 t37.5 -14.5h54q20 0 37.5 14.5t21.5 34.5l58 302q4 20 -8 34.5t-32 14.5zM675 400h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-150q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v150q0 10 -7.5 17.5t-17.5 7.5z" />
+<glyph unicode="&#xe102;" d="M260 1200q9 0 19 -2t15 -4l5 -2q22 -10 44 -23l196 -118q21 -13 36 -24q29 -21 37 -12q11 13 49 35l196 118q22 13 45 23q17 7 38 7q23 0 47 -16.5t37 -33.5l13 -16q14 -21 18 -45l25 -123l8 -44q1 -9 8.5 -14.5t17.5 -5.5h61q10 0 17.5 -7.5t7.5 -17.5v-50 q0 -10 -7.5 -17.5t-17.5 -7.5h-50q-10 0 -17.5 -7.5t-7.5 -17.5v-175h-400v300h-200v-300h-400v175q0 10 -7.5 17.5t-17.5 7.5h-50q-10 0 -17.5 7.5t-7.5 17.5v50q0 10 7.5 17.5t17.5 7.5h61q11 0 18 3t7 8q0 4 9 52l25 128q5 25 19 45q2 3 5 7t13.5 15t21.5 19.5t26.5 15.5 t29.5 7zM915 1079l-166 -162q-7 -7 -5 -12t12 -5h219q10 0 15 7t2 17l-51 149q-3 10 -11 12t-15 -6zM463 917l-177 157q-8 7 -16 5t-11 -12l-51 -143q-3 -10 2 -17t15 -7h231q11 0 12.5 5t-5.5 12zM500 0h-375q-10 0 -17.5 7.5t-7.5 17.5v375h400v-400zM1100 400v-375 q0 -10 -7.5 -17.5t-17.5 -7.5h-375v400h400z" />
+<glyph unicode="&#xe103;" d="M1165 1190q8 3 21 -6.5t13 -17.5q-2 -178 -24.5 -323.5t-55.5 -245.5t-87 -174.5t-102.5 -118.5t-118 -68.5t-118.5 -33t-120 -4.5t-105 9.5t-90 16.5q-61 12 -78 11q-4 1 -12.5 0t-34 -14.5t-52.5 -40.5l-153 -153q-26 -24 -37 -14.5t-11 43.5q0 64 42 102q8 8 50.5 45 t66.5 58q19 17 35 47t13 61q-9 55 -10 102.5t7 111t37 130t78 129.5q39 51 80 88t89.5 63.5t94.5 45t113.5 36t129 31t157.5 37t182 47.5zM1116 1098q-8 9 -22.5 -3t-45.5 -50q-38 -47 -119 -103.5t-142 -89.5l-62 -33q-56 -30 -102 -57t-104 -68t-102.5 -80.5t-85.5 -91 t-64 -104.5q-24 -56 -31 -86t2 -32t31.5 17.5t55.5 59.5q25 30 94 75.5t125.5 77.5t147.5 81q70 37 118.5 69t102 79.5t99 111t86.5 148.5q22 50 24 60t-6 19z" />
+<glyph unicode="&#xe104;" d="M653 1231q-39 -67 -54.5 -131t-10.5 -114.5t24.5 -96.5t47.5 -80t63.5 -62.5t68.5 -46.5t65 -30q-4 7 -17.5 35t-18.5 39.5t-17 39.5t-17 43t-13 42t-9.5 44.5t-2 42t4 43t13.5 39t23 38.5q96 -42 165 -107.5t105 -138t52 -156t13 -159t-19 -149.5q-13 -55 -44 -106.5 t-68 -87t-78.5 -64.5t-72.5 -45t-53 -22q-72 -22 -127 -11q-31 6 -13 19q6 3 17 7q13 5 32.5 21t41 44t38.5 63.5t21.5 81.5t-6.5 94.5t-50 107t-104 115.5q10 -104 -0.5 -189t-37 -140.5t-65 -93t-84 -52t-93.5 -11t-95 24.5q-80 36 -131.5 114t-53.5 171q-2 23 0 49.5 t4.5 52.5t13.5 56t27.5 60t46 64.5t69.5 68.5q-8 -53 -5 -102.5t17.5 -90t34 -68.5t44.5 -39t49 -2q31 13 38.5 36t-4.5 55t-29 64.5t-36 75t-26 75.5q-15 85 2 161.5t53.5 128.5t85.5 92.5t93.5 61t81.5 25.5z" />
+<glyph unicode="&#xe105;" d="M600 1094q82 0 160.5 -22.5t140 -59t116.5 -82.5t94.5 -95t68 -95t42.5 -82.5t14 -57.5t-14 -57.5t-43 -82.5t-68.5 -95t-94.5 -95t-116.5 -82.5t-140 -59t-159.5 -22.5t-159.5 22.5t-140 59t-116.5 82.5t-94.5 95t-68.5 95t-43 82.5t-14 57.5t14 57.5t42.5 82.5t68 95 t94.5 95t116.5 82.5t140 59t160.5 22.5zM888 829q-15 15 -18 12t5 -22q25 -57 25 -119q0 -124 -88 -212t-212 -88t-212 88t-88 212q0 59 23 114q8 19 4.5 22t-17.5 -12q-70 -69 -160 -184q-13 -16 -15 -40.5t9 -42.5q22 -36 47 -71t70 -82t92.5 -81t113 -58.5t133.5 -24.5 t133.5 24t113 58.5t92.5 81.5t70 81.5t47 70.5q11 18 9 42.5t-14 41.5q-90 117 -163 189zM448 727l-35 -36q-15 -15 -19.5 -38.5t4.5 -41.5q37 -68 93 -116q16 -13 38.5 -11t36.5 17l35 34q14 15 12.5 33.5t-16.5 33.5q-44 44 -89 117q-11 18 -28 20t-32 -12z" />
+<glyph unicode="&#xe106;" d="M592 0h-148l31 120q-91 20 -175.5 68.5t-143.5 106.5t-103.5 119t-66.5 110t-22 76q0 21 14 57.5t42.5 82.5t68 95t94.5 95t116.5 82.5t140 59t160.5 22.5q61 0 126 -15l32 121h148zM944 770l47 181q108 -85 176.5 -192t68.5 -159q0 -26 -19.5 -71t-59.5 -102t-93 -112 t-129 -104.5t-158 -75.5l46 173q77 49 136 117t97 131q11 18 9 42.5t-14 41.5q-54 70 -107 130zM310 824q-70 -69 -160 -184q-13 -16 -15 -40.5t9 -42.5q18 -30 39 -60t57 -70.5t74 -73t90 -61t105 -41.5l41 154q-107 18 -178.5 101.5t-71.5 193.5q0 59 23 114q8 19 4.5 22 t-17.5 -12zM448 727l-35 -36q-15 -15 -19.5 -38.5t4.5 -41.5q37 -68 93 -116q16 -13 38.5 -11t36.5 17l12 11l22 86l-3 4q-44 44 -89 117q-11 18 -28 20t-32 -12z" />
+<glyph unicode="&#xe107;" d="M-90 100l642 1066q20 31 48 28.5t48 -35.5l642 -1056q21 -32 7.5 -67.5t-50.5 -35.5h-1294q-37 0 -50.5 34t7.5 66zM155 200h345v75q0 10 7.5 17.5t17.5 7.5h150q10 0 17.5 -7.5t7.5 -17.5v-75h345l-445 723zM496 700h208q20 0 32 -14.5t8 -34.5l-58 -252 q-4 -20 -21.5 -34.5t-37.5 -14.5h-54q-20 0 -37.5 14.5t-21.5 34.5l-58 252q-4 20 8 34.5t32 14.5z" />
+<glyph unicode="&#xe108;" d="M650 1200q62 0 106 -44t44 -106v-339l363 -325q15 -14 26 -38.5t11 -44.5v-41q0 -20 -12 -26.5t-29 5.5l-359 249v-263q100 -93 100 -113v-64q0 -21 -13 -29t-32 1l-205 128l-205 -128q-19 -9 -32 -1t-13 29v64q0 20 100 113v263l-359 -249q-17 -12 -29 -5.5t-12 26.5v41 q0 20 11 44.5t26 38.5l363 325v339q0 62 44 106t106 44z" />
+<glyph unicode="&#xe109;" d="M850 1200h100q21 0 35.5 -14.5t14.5 -35.5v-50h50q21 0 35.5 -14.5t14.5 -35.5v-150h-1100v150q0 21 14.5 35.5t35.5 14.5h50v50q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-50h500v50q0 21 14.5 35.5t35.5 14.5zM1100 800v-750q0 -21 -14.5 -35.5 t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v750h1100zM100 600v-100h100v100h-100zM300 600v-100h100v100h-100zM500 600v-100h100v100h-100zM700 600v-100h100v100h-100zM900 600v-100h100v100h-100zM100 400v-100h100v100h-100zM300 400v-100h100v100h-100zM500 400 v-100h100v100h-100zM700 400v-100h100v100h-100zM900 400v-100h100v100h-100zM100 200v-100h100v100h-100zM300 200v-100h100v100h-100zM500 200v-100h100v100h-100zM700 200v-100h100v100h-100zM900 200v-100h100v100h-100z" />
+<glyph unicode="&#xe110;" d="M1135 1165l249 -230q15 -14 15 -35t-15 -35l-249 -230q-14 -14 -24.5 -10t-10.5 25v150h-159l-600 -600h-291q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h209l600 600h241v150q0 21 10.5 25t24.5 -10zM522 819l-141 -141l-122 122h-209q-21 0 -35.5 14.5 t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h291zM1135 565l249 -230q15 -14 15 -35t-15 -35l-249 -230q-14 -14 -24.5 -10t-10.5 25v150h-241l-181 181l141 141l122 -122h159v150q0 21 10.5 25t24.5 -10z" />
+<glyph unicode="&#xe111;" d="M100 1100h1000q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-596l-304 -300v300h-100q-41 0 -70.5 29.5t-29.5 70.5v600q0 41 29.5 70.5t70.5 29.5z" />
+<glyph unicode="&#xe112;" d="M150 1200h200q21 0 35.5 -14.5t14.5 -35.5v-250h-300v250q0 21 14.5 35.5t35.5 14.5zM850 1200h200q21 0 35.5 -14.5t14.5 -35.5v-250h-300v250q0 21 14.5 35.5t35.5 14.5zM1100 800v-300q0 -41 -3 -77.5t-15 -89.5t-32 -96t-58 -89t-89 -77t-129 -51t-174 -20t-174 20 t-129 51t-89 77t-58 89t-32 96t-15 89.5t-3 77.5v300h300v-250v-27v-42.5t1.5 -41t5 -38t10 -35t16.5 -30t25.5 -24.5t35 -19t46.5 -12t60 -4t60 4.5t46.5 12.5t35 19.5t25 25.5t17 30.5t10 35t5 38t2 40.5t-0.5 42v25v250h300z" />
+<glyph unicode="&#xe113;" d="M1100 411l-198 -199l-353 353l-353 -353l-197 199l551 551z" />
+<glyph unicode="&#xe114;" d="M1101 789l-550 -551l-551 551l198 199l353 -353l353 353z" />
+<glyph unicode="&#xe115;" d="M404 1000h746q21 0 35.5 -14.5t14.5 -35.5v-551h150q21 0 25 -10.5t-10 -24.5l-230 -249q-14 -15 -35 -15t-35 15l-230 249q-14 14 -10 24.5t25 10.5h150v401h-381zM135 984l230 -249q14 -14 10 -24.5t-25 -10.5h-150v-400h385l215 -200h-750q-21 0 -35.5 14.5 t-14.5 35.5v550h-150q-21 0 -25 10.5t10 24.5l230 249q14 15 35 15t35 -15z" />
+<glyph unicode="&#xe116;" d="M56 1200h94q17 0 31 -11t18 -27l38 -162h896q24 0 39 -18.5t10 -42.5l-100 -475q-5 -21 -27 -42.5t-55 -21.5h-633l48 -200h535q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-50v-50q0 -21 -14.5 -35.5t-35.5 -14.5t-35.5 14.5t-14.5 35.5v50h-300v-50 q0 -21 -14.5 -35.5t-35.5 -14.5t-35.5 14.5t-14.5 35.5v50h-31q-18 0 -32.5 10t-20.5 19l-5 10l-201 961h-54q-20 0 -35 14.5t-15 35.5t15 35.5t35 14.5z" />
+<glyph unicode="&#xe117;" d="M1200 1000v-100h-1200v100h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500zM0 800h1200v-800h-1200v800z" />
+<glyph unicode="&#xe118;" d="M200 800l-200 -400v600h200q0 41 29.5 70.5t70.5 29.5h300q42 0 71 -29.5t29 -70.5h500v-200h-1000zM1500 700l-300 -700h-1200l300 700h1200z" />
+<glyph unicode="&#xe119;" d="M635 1184l230 -249q14 -14 10 -24.5t-25 -10.5h-150v-601h150q21 0 25 -10.5t-10 -24.5l-230 -249q-14 -15 -35 -15t-35 15l-230 249q-14 14 -10 24.5t25 10.5h150v601h-150q-21 0 -25 10.5t10 24.5l230 249q14 15 35 15t35 -15z" />
+<glyph unicode="&#xe120;" d="M936 864l249 -229q14 -15 14 -35.5t-14 -35.5l-249 -229q-15 -15 -25.5 -10.5t-10.5 24.5v151h-600v-151q0 -20 -10.5 -24.5t-25.5 10.5l-249 229q-14 15 -14 35.5t14 35.5l249 229q15 15 25.5 10.5t10.5 -25.5v-149h600v149q0 21 10.5 25.5t25.5 -10.5z" />
+<glyph unicode="&#xe121;" d="M1169 400l-172 732q-5 23 -23 45.5t-38 22.5h-672q-20 0 -38 -20t-23 -41l-172 -739h1138zM1100 300h-1000q-41 0 -70.5 -29.5t-29.5 -70.5v-100q0 -41 29.5 -70.5t70.5 -29.5h1000q41 0 70.5 29.5t29.5 70.5v100q0 41 -29.5 70.5t-70.5 29.5zM800 100v100h100v-100h-100 zM1000 100v100h100v-100h-100z" />
+<glyph unicode="&#xe122;" d="M1150 1100q21 0 35.5 -14.5t14.5 -35.5v-850q0 -21 -14.5 -35.5t-35.5 -14.5t-35.5 14.5t-14.5 35.5v850q0 21 14.5 35.5t35.5 14.5zM1000 200l-675 200h-38l47 -276q3 -16 -5.5 -20t-29.5 -4h-7h-84q-20 0 -34.5 14t-18.5 35q-55 337 -55 351v250v6q0 16 1 23.5t6.5 14 t17.5 6.5h200l675 250v-850zM0 750v-250q-4 0 -11 0.5t-24 6t-30 15t-24 30t-11 48.5v50q0 26 10.5 46t25 30t29 16t25.5 7z" />
+<glyph unicode="&#xe123;" d="M553 1200h94q20 0 29 -10.5t3 -29.5l-18 -37q83 -19 144 -82.5t76 -140.5l63 -327l118 -173h17q19 0 33 -14.5t14 -35t-13 -40.5t-31 -27q-8 -4 -23 -9.5t-65 -19.5t-103 -25t-132.5 -20t-158.5 -9q-57 0 -115 5t-104 12t-88.5 15.5t-73.5 17.5t-54.5 16t-35.5 12l-11 4 q-18 8 -31 28t-13 40.5t14 35t33 14.5h17l118 173l63 327q15 77 76 140t144 83l-18 32q-6 19 3.5 32t28.5 13zM498 110q50 -6 102 -6q53 0 102 6q-12 -49 -39.5 -79.5t-62.5 -30.5t-63 30.5t-39 79.5z" />
+<glyph unicode="&#xe124;" d="M800 946l224 78l-78 -224l234 -45l-180 -155l180 -155l-234 -45l78 -224l-224 78l-45 -234l-155 180l-155 -180l-45 234l-224 -78l78 224l-234 45l180 155l-180 155l234 45l-78 224l224 -78l45 234l155 -180l155 180z" />
+<glyph unicode="&#xe125;" d="M650 1200h50q40 0 70 -40.5t30 -84.5v-150l-28 -125h328q40 0 70 -40.5t30 -84.5v-100q0 -45 -29 -74l-238 -344q-16 -24 -38 -40.5t-45 -16.5h-250q-7 0 -42 25t-66 50l-31 25h-61q-45 0 -72.5 18t-27.5 57v400q0 36 20 63l145 196l96 198q13 28 37.5 48t51.5 20z M650 1100l-100 -212l-150 -213v-375h100l136 -100h214l250 375v125h-450l50 225v175h-50zM50 800h100q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v500q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe126;" d="M600 1100h250q23 0 45 -16.5t38 -40.5l238 -344q29 -29 29 -74v-100q0 -44 -30 -84.5t-70 -40.5h-328q28 -118 28 -125v-150q0 -44 -30 -84.5t-70 -40.5h-50q-27 0 -51.5 20t-37.5 48l-96 198l-145 196q-20 27 -20 63v400q0 39 27.5 57t72.5 18h61q124 100 139 100z M50 1000h100q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v500q0 21 14.5 35.5t35.5 14.5zM636 1000l-136 -100h-100v-375l150 -213l100 -212h50v175l-50 225h450v125l-250 375h-214z" />
+<glyph unicode="&#xe127;" d="M356 873l363 230q31 16 53 -6l110 -112q13 -13 13.5 -32t-11.5 -34l-84 -121h302q84 0 138 -38t54 -110t-55 -111t-139 -39h-106l-131 -339q-6 -21 -19.5 -41t-28.5 -20h-342q-7 0 -90 81t-83 94v525q0 17 14 35.5t28 28.5zM400 792v-503l100 -89h293l131 339 q6 21 19.5 41t28.5 20h203q21 0 30.5 25t0.5 50t-31 25h-456h-7h-6h-5.5t-6 0.5t-5 1.5t-5 2t-4 2.5t-4 4t-2.5 4.5q-12 25 5 47l146 183l-86 83zM50 800h100q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v500 q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe128;" d="M475 1103l366 -230q2 -1 6 -3.5t14 -10.5t18 -16.5t14.5 -20t6.5 -22.5v-525q0 -13 -86 -94t-93 -81h-342q-15 0 -28.5 20t-19.5 41l-131 339h-106q-85 0 -139.5 39t-54.5 111t54 110t138 38h302l-85 121q-11 15 -10.5 34t13.5 32l110 112q22 22 53 6zM370 945l146 -183 q17 -22 5 -47q-2 -2 -3.5 -4.5t-4 -4t-4 -2.5t-5 -2t-5 -1.5t-6 -0.5h-6h-6.5h-6h-475v-100h221q15 0 29 -20t20 -41l130 -339h294l106 89v503l-342 236zM1050 800h100q21 0 35.5 -14.5t14.5 -35.5v-500q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5 v500q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe129;" d="M550 1294q72 0 111 -55t39 -139v-106l339 -131q21 -6 41 -19.5t20 -28.5v-342q0 -7 -81 -90t-94 -83h-525q-17 0 -35.5 14t-28.5 28l-9 14l-230 363q-16 31 6 53l112 110q13 13 32 13.5t34 -11.5l121 -84v302q0 84 38 138t110 54zM600 972v203q0 21 -25 30.5t-50 0.5 t-25 -31v-456v-7v-6v-5.5t-0.5 -6t-1.5 -5t-2 -5t-2.5 -4t-4 -4t-4.5 -2.5q-25 -12 -47 5l-183 146l-83 -86l236 -339h503l89 100v293l-339 131q-21 6 -41 19.5t-20 28.5zM450 200h500q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-500 q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe130;" d="M350 1100h500q21 0 35.5 14.5t14.5 35.5v100q0 21 -14.5 35.5t-35.5 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -21 14.5 -35.5t35.5 -14.5zM600 306v-106q0 -84 -39 -139t-111 -55t-110 54t-38 138v302l-121 -84q-15 -12 -34 -11.5t-32 13.5l-112 110 q-22 22 -6 53l230 363q1 2 3.5 6t10.5 13.5t16.5 17t20 13.5t22.5 6h525q13 0 94 -83t81 -90v-342q0 -15 -20 -28.5t-41 -19.5zM308 900l-236 -339l83 -86l183 146q22 17 47 5q2 -1 4.5 -2.5t4 -4t2.5 -4t2 -5t1.5 -5t0.5 -6v-5.5v-6v-7v-456q0 -22 25 -31t50 0.5t25 30.5 v203q0 15 20 28.5t41 19.5l339 131v293l-89 100h-503z" />
+<glyph unicode="&#xe131;" d="M600 1178q118 0 225 -45.5t184.5 -123t123 -184.5t45.5 -225t-45.5 -225t-123 -184.5t-184.5 -123t-225 -45.5t-225 45.5t-184.5 123t-123 184.5t-45.5 225t45.5 225t123 184.5t184.5 123t225 45.5zM914 632l-275 223q-16 13 -27.5 8t-11.5 -26v-137h-275 q-10 0 -17.5 -7.5t-7.5 -17.5v-150q0 -10 7.5 -17.5t17.5 -7.5h275v-137q0 -21 11.5 -26t27.5 8l275 223q16 13 16 32t-16 32z" />
+<glyph unicode="&#xe132;" d="M600 1178q118 0 225 -45.5t184.5 -123t123 -184.5t45.5 -225t-45.5 -225t-123 -184.5t-184.5 -123t-225 -45.5t-225 45.5t-184.5 123t-123 184.5t-45.5 225t45.5 225t123 184.5t184.5 123t225 45.5zM561 855l-275 -223q-16 -13 -16 -32t16 -32l275 -223q16 -13 27.5 -8 t11.5 26v137h275q10 0 17.5 7.5t7.5 17.5v150q0 10 -7.5 17.5t-17.5 7.5h-275v137q0 21 -11.5 26t-27.5 -8z" />
+<glyph unicode="&#xe133;" d="M600 1178q118 0 225 -45.5t184.5 -123t123 -184.5t45.5 -225t-45.5 -225t-123 -184.5t-184.5 -123t-225 -45.5t-225 45.5t-184.5 123t-123 184.5t-45.5 225t45.5 225t123 184.5t184.5 123t225 45.5zM855 639l-223 275q-13 16 -32 16t-32 -16l-223 -275q-13 -16 -8 -27.5 t26 -11.5h137v-275q0 -10 7.5 -17.5t17.5 -7.5h150q10 0 17.5 7.5t7.5 17.5v275h137q21 0 26 11.5t-8 27.5z" />
+<glyph unicode="&#xe134;" d="M600 1178q118 0 225 -45.5t184.5 -123t123 -184.5t45.5 -225t-45.5 -225t-123 -184.5t-184.5 -123t-225 -45.5t-225 45.5t-184.5 123t-123 184.5t-45.5 225t45.5 225t123 184.5t184.5 123t225 45.5zM675 900h-150q-10 0 -17.5 -7.5t-7.5 -17.5v-275h-137q-21 0 -26 -11.5 t8 -27.5l223 -275q13 -16 32 -16t32 16l223 275q13 16 8 27.5t-26 11.5h-137v275q0 10 -7.5 17.5t-17.5 7.5z" />
+<glyph unicode="&#xe135;" d="M600 1176q116 0 222.5 -46t184 -123.5t123.5 -184t46 -222.5t-46 -222.5t-123.5 -184t-184 -123.5t-222.5 -46t-222.5 46t-184 123.5t-123.5 184t-46 222.5t46 222.5t123.5 184t184 123.5t222.5 46zM627 1101q-15 -12 -36.5 -20.5t-35.5 -12t-43 -8t-39 -6.5 q-15 -3 -45.5 0t-45.5 -2q-20 -7 -51.5 -26.5t-34.5 -34.5q-3 -11 6.5 -22.5t8.5 -18.5q-3 -34 -27.5 -91t-29.5 -79q-9 -34 5 -93t8 -87q0 -9 17 -44.5t16 -59.5q12 0 23 -5t23.5 -15t19.5 -14q16 -8 33 -15t40.5 -15t34.5 -12q21 -9 52.5 -32t60 -38t57.5 -11 q7 -15 -3 -34t-22.5 -40t-9.5 -38q13 -21 23 -34.5t27.5 -27.5t36.5 -18q0 -7 -3.5 -16t-3.5 -14t5 -17q104 -2 221 112q30 29 46.5 47t34.5 49t21 63q-13 8 -37 8.5t-36 7.5q-15 7 -49.5 15t-51.5 19q-18 0 -41 -0.5t-43 -1.5t-42 -6.5t-38 -16.5q-51 -35 -66 -12 q-4 1 -3.5 25.5t0.5 25.5q-6 13 -26.5 17.5t-24.5 6.5q1 15 -0.5 30.5t-7 28t-18.5 11.5t-31 -21q-23 -25 -42 4q-19 28 -8 58q6 16 22 22q6 -1 26 -1.5t33.5 -4t19.5 -13.5q7 -12 18 -24t21.5 -20.5t20 -15t15.5 -10.5l5 -3q2 12 7.5 30.5t8 34.5t-0.5 32q-
 3 18 3.5 29 t18 22.5t15.5 24.5q6 14 10.5 35t8 31t15.5 22.5t34 22.5q-6 18 10 36q8 0 24 -1.5t24.5 -1.5t20 4.5t20.5 15.5q-10 23 -31 42.5t-37.5 29.5t-49 27t-43.5 23q0 1 2 8t3 11.5t1.5 10.5t-1 9.5t-4.5 4.5q31 -13 58.5 -14.5t38.5 2.5l12 5q5 28 -9.5 46t-36.5 24t-50 15 t-41 20q-18 -4 -37 0zM613 994q0 -17 8 -42t17 -45t9 -23q-8 1 -39.5 5.5t-52.5 10t-37 16.5q3 11 16 29.5t16 25.5q10 -10 19 -10t14 6t13.5 14.5t16.5 12.5z" />
+<glyph unicode="&#xe136;" d="M756 1157q164 92 306 -9l-259 -138l145 -232l251 126q6 -89 -34 -156.5t-117 -110.5q-60 -34 -127 -39.5t-126 16.5l-596 -596q-15 -16 -36.5 -16t-36.5 16l-111 110q-15 15 -15 36.5t15 37.5l600 599q-34 101 5.5 201.5t135.5 154.5z" />
+<glyph unicode="&#xe137;" horiz-adv-x="1220" d="M100 1196h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 1096h-200v-100h200v100zM100 796h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 696h-500v-100h500v100zM100 396h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 296h-300v-100h300v100z " />
+<glyph unicode="&#xe138;" d="M150 1200h900q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-900q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM700 500v-300l-200 -200v500l-350 500h900z" />
+<glyph unicode="&#xe139;" d="M500 1200h200q41 0 70.5 -29.5t29.5 -70.5v-100h300q41 0 70.5 -29.5t29.5 -70.5v-400h-500v100h-200v-100h-500v400q0 41 29.5 70.5t70.5 29.5h300v100q0 41 29.5 70.5t70.5 29.5zM500 1100v-100h200v100h-200zM1200 400v-200q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5v200h1200z" />
+<glyph unicode="&#xe140;" d="M50 1200h300q21 0 25 -10.5t-10 -24.5l-94 -94l199 -199q7 -8 7 -18t-7 -18l-106 -106q-8 -7 -18 -7t-18 7l-199 199l-94 -94q-14 -14 -24.5 -10t-10.5 25v300q0 21 14.5 35.5t35.5 14.5zM850 1200h300q21 0 35.5 -14.5t14.5 -35.5v-300q0 -21 -10.5 -25t-24.5 10l-94 94 l-199 -199q-8 -7 -18 -7t-18 7l-106 106q-7 8 -7 18t7 18l199 199l-94 94q-14 14 -10 24.5t25 10.5zM364 470l106 -106q7 -8 7 -18t-7 -18l-199 -199l94 -94q14 -14 10 -24.5t-25 -10.5h-300q-21 0 -35.5 14.5t-14.5 35.5v300q0 21 10.5 25t24.5 -10l94 -94l199 199 q8 7 18 7t18 -7zM1071 271l94 94q14 14 24.5 10t10.5 -25v-300q0 -21 -14.5 -35.5t-35.5 -14.5h-300q-21 0 -25 10.5t10 24.5l94 94l-199 199q-7 8 -7 18t7 18l106 106q8 7 18 7t18 -7z" />
+<glyph unicode="&#xe141;" d="M596 1192q121 0 231.5 -47.5t190 -127t127 -190t47.5 -231.5t-47.5 -231.5t-127 -190.5t-190 -127t-231.5 -47t-231.5 47t-190.5 127t-127 190.5t-47 231.5t47 231.5t127 190t190.5 127t231.5 47.5zM596 1010q-112 0 -207.5 -55.5t-151 -151t-55.5 -207.5t55.5 -207.5 t151 -151t207.5 -55.5t207.5 55.5t151 151t55.5 207.5t-55.5 207.5t-151 151t-207.5 55.5zM454.5 905q22.5 0 38.5 -16t16 -38.5t-16 -39t-38.5 -16.5t-38.5 16.5t-16 39t16 38.5t38.5 16zM754.5 905q22.5 0 38.5 -16t16 -38.5t-16 -39t-38 -16.5q-14 0 -29 10l-55 -145 q17 -23 17 -51q0 -36 -25.5 -61.5t-61.5 -25.5t-61.5 25.5t-25.5 61.5q0 32 20.5 56.5t51.5 29.5l122 126l1 1q-9 14 -9 28q0 23 16 39t38.5 16zM345.5 709q22.5 0 38.5 -16t16 -38.5t-16 -38.5t-38.5 -16t-38.5 16t-16 38.5t16 38.5t38.5 16zM854.5 709q22.5 0 38.5 -16 t16 -38.5t-16 -38.5t-38.5 -16t-38.5 16t-16 38.5t16 38.5t38.5 16z" />
+<glyph unicode="&#xe142;" d="M546 173l469 470q91 91 99 192q7 98 -52 175.5t-154 94.5q-22 4 -47 4q-34 0 -66.5 -10t-56.5 -23t-55.5 -38t-48 -41.5t-48.5 -47.5q-376 -375 -391 -390q-30 -27 -45 -41.5t-37.5 -41t-32 -46.5t-16 -47.5t-1.5 -56.5q9 -62 53.5 -95t99.5 -33q74 0 125 51l548 548 q36 36 20 75q-7 16 -21.5 26t-32.5 10q-26 0 -50 -23q-13 -12 -39 -38l-341 -338q-15 -15 -35.5 -15.5t-34.5 13.5t-14 34.5t14 34.5q327 333 361 367q35 35 67.5 51.5t78.5 16.5q14 0 29 -1q44 -8 74.5 -35.5t43.5 -68.5q14 -47 2 -96.5t-47 -84.5q-12 -11 -32 -32 t-79.5 -81t-114.5 -115t-124.5 -123.5t-123 -119.5t-96.5 -89t-57 -45q-56 -27 -120 -27q-70 0 -129 32t-93 89q-48 78 -35 173t81 163l511 511q71 72 111 96q91 55 198 55q80 0 152 -33q78 -36 129.5 -103t66.5 -154q17 -93 -11 -183.5t-94 -156.5l-482 -476 q-15 -15 -36 -16t-37 14t-17.5 34t14.5 35z" />
+<glyph unicode="&#xe143;" d="M649 949q48 68 109.5 104t121.5 38.5t118.5 -20t102.5 -64t71 -100.5t27 -123q0 -57 -33.5 -117.5t-94 -124.5t-126.5 -127.5t-150 -152.5t-146 -174q-62 85 -145.5 174t-150 152.5t-126.5 127.5t-93.5 124.5t-33.5 117.5q0 64 28 123t73 100.5t104 64t119 20 t120.5 -38.5t104.5 -104zM896 972q-33 0 -64.5 -19t-56.5 -46t-47.5 -53.5t-43.5 -45.5t-37.5 -19t-36 19t-40 45.5t-43 53.5t-54 46t-65.5 19q-67 0 -122.5 -55.5t-55.5 -132.5q0 -23 13.5 -51t46 -65t57.5 -63t76 -75l22 -22q15 -14 44 -44t50.5 -51t46 -44t41 -35t23 -12 t23.5 12t42.5 36t46 44t52.5 52t44 43q4 4 12 13q43 41 63.5 62t52 55t46 55t26 46t11.5 44q0 79 -53 133.5t-120 54.5z" />
+<glyph unicode="&#xe144;" d="M776.5 1214q93.5 0 159.5 -66l141 -141q66 -66 66 -160q0 -42 -28 -95.5t-62 -87.5l-29 -29q-31 53 -77 99l-18 18l95 95l-247 248l-389 -389l212 -212l-105 -106l-19 18l-141 141q-66 66 -66 159t66 159l283 283q65 66 158.5 66zM600 706l105 105q10 -8 19 -17l141 -141 q66 -66 66 -159t-66 -159l-283 -283q-66 -66 -159 -66t-159 66l-141 141q-66 66 -66 159.5t66 159.5l55 55q29 -55 75 -102l18 -17l-95 -95l247 -248l389 389z" />
+<glyph unicode="&#xe145;" d="M603 1200q85 0 162 -15t127 -38t79 -48t29 -46v-953q0 -41 -29.5 -70.5t-70.5 -29.5h-600q-41 0 -70.5 29.5t-29.5 70.5v953q0 21 30 46.5t81 48t129 37.5t163 15zM300 1000v-700h600v700h-600zM600 254q-43 0 -73.5 -30.5t-30.5 -73.5t30.5 -73.5t73.5 -30.5t73.5 30.5 t30.5 73.5t-30.5 73.5t-73.5 30.5z" />
+<glyph unicode="&#xe146;" d="M902 1185l283 -282q15 -15 15 -36t-14.5 -35.5t-35.5 -14.5t-35 15l-36 35l-279 -267v-300l-212 210l-308 -307l-280 -203l203 280l307 308l-210 212h300l267 279l-35 36q-15 14 -15 35t14.5 35.5t35.5 14.5t35 -15z" />
+<glyph unicode="&#xe148;" d="M700 1248v-78q38 -5 72.5 -14.5t75.5 -31.5t71 -53.5t52 -84t24 -118.5h-159q-4 36 -10.5 59t-21 45t-40 35.5t-64.5 20.5v-307l64 -13q34 -7 64 -16.5t70 -32t67.5 -52.5t47.5 -80t20 -112q0 -139 -89 -224t-244 -97v-77h-100v79q-150 16 -237 103q-40 40 -52.5 93.5 t-15.5 139.5h139q5 -77 48.5 -126t117.5 -65v335l-27 8q-46 14 -79 26.5t-72 36t-63 52t-40 72.5t-16 98q0 70 25 126t67.5 92t94.5 57t110 27v77h100zM600 754v274q-29 -4 -50 -11t-42 -21.5t-31.5 -41.5t-10.5 -65q0 -29 7 -50.5t16.5 -34t28.5 -22.5t31.5 -14t37.5 -10 q9 -3 13 -4zM700 547v-310q22 2 42.5 6.5t45 15.5t41.5 27t29 42t12 59.5t-12.5 59.5t-38 44.5t-53 31t-66.5 24.5z" />
+<glyph unicode="&#xe149;" d="M561 1197q84 0 160.5 -40t123.5 -109.5t47 -147.5h-153q0 40 -19.5 71.5t-49.5 48.5t-59.5 26t-55.5 9q-37 0 -79 -14.5t-62 -35.5q-41 -44 -41 -101q0 -26 13.5 -63t26.5 -61t37 -66q6 -9 9 -14h241v-100h-197q8 -50 -2.5 -115t-31.5 -95q-45 -62 -99 -112 q34 10 83 17.5t71 7.5q32 1 102 -16t104 -17q83 0 136 30l50 -147q-31 -19 -58 -30.5t-55 -15.5t-42 -4.5t-46 -0.5q-23 0 -76 17t-111 32.5t-96 11.5q-39 -3 -82 -16t-67 -25l-23 -11l-55 145q4 3 16 11t15.5 10.5t13 9t15.5 12t14.5 14t17.5 18.5q48 55 54 126.5 t-30 142.5h-221v100h166q-23 47 -44 104q-7 20 -12 41.5t-6 55.5t6 66.5t29.5 70.5t58.5 71q97 88 263 88z" />
+<glyph unicode="&#xe150;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM935 1184l230 -249q14 -14 10 -24.5t-25 -10.5h-150v-900h-200v900h-150q-21 0 -25 10.5t10 24.5l230 249q14 15 35 15t35 -15z" />
+<glyph unicode="&#xe151;" d="M1000 700h-100v100h-100v-100h-100v500h300v-500zM400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM801 1100v-200h100v200h-100zM1000 350l-200 -250h200v-100h-300v150l200 250h-200v100h300v-150z " />
+<glyph unicode="&#xe152;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM1000 1050l-200 -250h200v-100h-300v150l200 250h-200v100h300v-150zM1000 0h-100v100h-100v-100h-100v500h300v-500zM801 400v-200h100v200h-100z " />
+<glyph unicode="&#xe153;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM1000 700h-100v400h-100v100h200v-500zM1100 0h-100v100h-200v400h300v-500zM901 400v-200h100v200h-100z" />
+<glyph unicode="&#xe154;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM1100 700h-100v100h-200v400h300v-500zM901 1100v-200h100v200h-100zM1000 0h-100v400h-100v100h200v-500z" />
+<glyph unicode="&#xe155;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM900 1000h-200v200h200v-200zM1000 700h-300v200h300v-200zM1100 400h-400v200h400v-200zM1200 100h-500v200h500v-200z" />
+<glyph unicode="&#xe156;" d="M400 300h150q21 0 25 -11t-10 -25l-230 -250q-14 -15 -35 -15t-35 15l-230 250q-14 14 -10 25t25 11h150v900h200v-900zM1200 1000h-500v200h500v-200zM1100 700h-400v200h400v-200zM1000 400h-300v200h300v-200zM900 100h-200v200h200v-200z" />
+<glyph unicode="&#xe157;" d="M350 1100h400q162 0 256 -93.5t94 -256.5v-400q0 -165 -93.5 -257.5t-256.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5 v500q0 41 -29.5 70.5t-70.5 29.5z" />
+<glyph unicode="&#xe158;" d="M350 1100h400q165 0 257.5 -92.5t92.5 -257.5v-400q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-163 0 -256.5 92.5t-93.5 257.5v400q0 163 94 256.5t256 93.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5 v500q0 41 -29.5 70.5t-70.5 29.5zM440 770l253 -190q17 -12 17 -30t-17 -30l-253 -190q-16 -12 -28 -6.5t-12 26.5v400q0 21 12 26.5t28 -6.5z" />
+<glyph unicode="&#xe159;" d="M350 1100h400q163 0 256.5 -94t93.5 -256v-400q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 163 92.5 256.5t257.5 93.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5 v500q0 41 -29.5 70.5t-70.5 29.5zM350 700h400q21 0 26.5 -12t-6.5 -28l-190 -253q-12 -17 -30 -17t-30 17l-190 253q-12 16 -6.5 28t26.5 12z" />
+<glyph unicode="&#xe160;" d="M350 1100h400q165 0 257.5 -92.5t92.5 -257.5v-400q0 -163 -92.5 -256.5t-257.5 -93.5h-400q-163 0 -256.5 94t-93.5 256v400q0 165 92.5 257.5t257.5 92.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5 v500q0 41 -29.5 70.5t-70.5 29.5zM580 693l190 -253q12 -16 6.5 -28t-26.5 -12h-400q-21 0 -26.5 12t6.5 28l190 253q12 17 30 17t30 -17z" />
+<glyph unicode="&#xe161;" d="M550 1100h400q165 0 257.5 -92.5t92.5 -257.5v-400q0 -165 -92.5 -257.5t-257.5 -92.5h-400q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h450q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-450q-21 0 -35.5 14.5t-14.5 35.5v100 q0 21 14.5 35.5t35.5 14.5zM338 867l324 -284q16 -14 16 -33t-16 -33l-324 -284q-16 -14 -27 -9t-11 26v150h-250q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h250v150q0 21 11 26t27 -9z" />
+<glyph unicode="&#xe162;" d="M793 1182l9 -9q8 -10 5 -27q-3 -11 -79 -225.5t-78 -221.5l300 1q24 0 32.5 -17.5t-5.5 -35.5q-1 0 -133.5 -155t-267 -312.5t-138.5 -162.5q-12 -15 -26 -15h-9l-9 8q-9 11 -4 32q2 9 42 123.5t79 224.5l39 110h-302q-23 0 -31 19q-10 21 6 41q75 86 209.5 237.5 t228 257t98.5 111.5q9 16 25 16h9z" />
+<glyph unicode="&#xe163;" d="M350 1100h400q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-450q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h450q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400 q0 165 92.5 257.5t257.5 92.5zM938 867l324 -284q16 -14 16 -33t-16 -33l-324 -284q-16 -14 -27 -9t-11 26v150h-250q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h250v150q0 21 11 26t27 -9z" />
+<glyph unicode="&#xe164;" d="M750 1200h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -10.5 -25t-24.5 10l-109 109l-312 -312q-15 -15 -35.5 -15t-35.5 15l-141 141q-15 15 -15 35.5t15 35.5l312 312l-109 109q-14 14 -10 24.5t25 10.5zM456 900h-156q-41 0 -70.5 -29.5t-29.5 -70.5v-500 q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v148l200 200v-298q0 -165 -93.5 -257.5t-256.5 -92.5h-400q-165 0 -257.5 92.5t-92.5 257.5v400q0 165 92.5 257.5t257.5 92.5h300z" />
+<glyph unicode="&#xe165;" d="M600 1186q119 0 227.5 -46.5t187 -125t125 -187t46.5 -227.5t-46.5 -227.5t-125 -187t-187 -125t-227.5 -46.5t-227.5 46.5t-187 125t-125 187t-46.5 227.5t46.5 227.5t125 187t187 125t227.5 46.5zM600 1022q-115 0 -212 -56.5t-153.5 -153.5t-56.5 -212t56.5 -212 t153.5 -153.5t212 -56.5t212 56.5t153.5 153.5t56.5 212t-56.5 212t-153.5 153.5t-212 56.5zM600 794q80 0 137 -57t57 -137t-57 -137t-137 -57t-137 57t-57 137t57 137t137 57z" />
+<glyph unicode="&#xe166;" d="M450 1200h200q21 0 35.5 -14.5t14.5 -35.5v-350h245q20 0 25 -11t-9 -26l-383 -426q-14 -15 -33.5 -15t-32.5 15l-379 426q-13 15 -8.5 26t25.5 11h250v350q0 21 14.5 35.5t35.5 14.5zM50 300h1000q21 0 35.5 -14.5t14.5 -35.5v-250h-1100v250q0 21 14.5 35.5t35.5 14.5z M900 200v-50h100v50h-100z" />
+<glyph unicode="&#xe167;" d="M583 1182l378 -435q14 -15 9 -31t-26 -16h-244v-250q0 -20 -17 -35t-39 -15h-200q-20 0 -32 14.5t-12 35.5v250h-250q-20 0 -25.5 16.5t8.5 31.5l383 431q14 16 33.5 17t33.5 -14zM50 300h1000q21 0 35.5 -14.5t14.5 -35.5v-250h-1100v250q0 21 14.5 35.5t35.5 14.5z M900 200v-50h100v50h-100z" />
+<glyph unicode="&#xe168;" d="M396 723l369 369q7 7 17.5 7t17.5 -7l139 -139q7 -8 7 -18.5t-7 -17.5l-525 -525q-7 -8 -17.5 -8t-17.5 8l-292 291q-7 8 -7 18t7 18l139 139q8 7 18.5 7t17.5 -7zM50 300h1000q21 0 35.5 -14.5t14.5 -35.5v-250h-1100v250q0 21 14.5 35.5t35.5 14.5zM900 200v-50h100v50 h-100z" />
+<glyph unicode="&#xe169;" d="M135 1023l142 142q14 14 35 14t35 -14l77 -77l-212 -212l-77 76q-14 15 -14 36t14 35zM655 855l210 210q14 14 24.5 10t10.5 -25l-2 -599q-1 -20 -15.5 -35t-35.5 -15l-597 -1q-21 0 -25 10.5t10 24.5l208 208l-154 155l212 212zM50 300h1000q21 0 35.5 -14.5t14.5 -35.5 v-250h-1100v250q0 21 14.5 35.5t35.5 14.5zM900 200v-50h100v50h-100z" />
+<glyph unicode="&#xe170;" d="M350 1200l599 -2q20 -1 35 -15.5t15 -35.5l1 -597q0 -21 -10.5 -25t-24.5 10l-208 208l-155 -154l-212 212l155 154l-210 210q-14 14 -10 24.5t25 10.5zM524 512l-76 -77q-15 -14 -36 -14t-35 14l-142 142q-14 14 -14 35t14 35l77 77zM50 300h1000q21 0 35.5 -14.5 t14.5 -35.5v-250h-1100v250q0 21 14.5 35.5t35.5 14.5zM900 200v-50h100v50h-100z" />
+<glyph unicode="&#xe171;" d="M1200 103l-483 276l-314 -399v423h-399l1196 796v-1096zM483 424v-230l683 953z" />
+<glyph unicode="&#xe172;" d="M1100 1000v-850q0 -21 -14.5 -35.5t-35.5 -14.5h-150v400h-700v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200z" />
+<glyph unicode="&#xe173;" d="M1100 1000l-2 -149l-299 -299l-95 95q-9 9 -21.5 9t-21.5 -9l-149 -147h-312v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200zM1132 638l106 -106q7 -7 7 -17.5t-7 -17.5l-420 -421q-8 -7 -18 -7 t-18 7l-202 203q-8 7 -8 17.5t8 17.5l106 106q7 8 17.5 8t17.5 -8l79 -79l297 297q7 7 17.5 7t17.5 -7z" />
+<glyph unicode="&#xe174;" d="M1100 1000v-269l-103 -103l-134 134q-15 15 -33.5 16.5t-34.5 -12.5l-266 -266h-329v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200zM1202 572l70 -70q15 -15 15 -35.5t-15 -35.5l-131 -131 l131 -131q15 -15 15 -35.5t-15 -35.5l-70 -70q-15 -15 -35.5 -15t-35.5 15l-131 131l-131 -131q-15 -15 -35.5 -15t-35.5 15l-70 70q-15 15 -15 35.5t15 35.5l131 131l-131 131q-15 15 -15 35.5t15 35.5l70 70q15 15 35.5 15t35.5 -15l131 -131l131 131q15 15 35.5 15 t35.5 -15z" />
+<glyph unicode="&#xe175;" d="M1100 1000v-300h-350q-21 0 -35.5 -14.5t-14.5 -35.5v-150h-500v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200zM850 600h100q21 0 35.5 -14.5t14.5 -35.5v-250h150q21 0 25 -10.5t-10 -24.5 l-230 -230q-14 -14 -35 -14t-35 14l-230 230q-14 14 -10 24.5t25 10.5h150v250q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe176;" d="M1100 1000v-400l-165 165q-14 15 -35 15t-35 -15l-263 -265h-402v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1000h-100v200h100v-200zM935 565l230 -229q14 -15 10 -25.5t-25 -10.5h-150v-250q0 -20 -14.5 -35 t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35v250h-150q-21 0 -25 10.5t10 25.5l230 229q14 15 35 15t35 -15z" />
+<glyph unicode="&#xe177;" d="M50 1100h1100q21 0 35.5 -14.5t14.5 -35.5v-150h-1200v150q0 21 14.5 35.5t35.5 14.5zM1200 800v-550q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v550h1200zM100 500v-200h400v200h-400z" />
+<glyph unicode="&#xe178;" d="M935 1165l248 -230q14 -14 14 -35t-14 -35l-248 -230q-14 -14 -24.5 -10t-10.5 25v150h-400v200h400v150q0 21 10.5 25t24.5 -10zM200 800h-50q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h50v-200zM400 800h-100v200h100v-200zM18 435l247 230 q14 14 24.5 10t10.5 -25v-150h400v-200h-400v-150q0 -21 -10.5 -25t-24.5 10l-247 230q-15 14 -15 35t15 35zM900 300h-100v200h100v-200zM1000 500h51q20 0 34.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-34.5 -14.5h-51v200z" />
+<glyph unicode="&#xe179;" d="M862 1073l276 116q25 18 43.5 8t18.5 -41v-1106q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v397q-4 1 -11 5t-24 17.5t-30 29t-24 42t-11 56.5v359q0 31 18.5 65t43.5 52zM550 1200q22 0 34.5 -12.5t14.5 -24.5l1 -13v-450q0 -28 -10.5 -59.5 t-25 -56t-29 -45t-25.5 -31.5l-10 -11v-447q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v447q-4 4 -11 11.5t-24 30.5t-30 46t-24 55t-11 60v450q0 2 0.5 5.5t4 12t8.5 15t14.5 12t22.5 5.5q20 0 32.5 -12.5t14.5 -24.5l3 -13v-350h100v350v5.5t2.5 12 t7 15t15 12t25.5 5.5q23 0 35.5 -12.5t13.5 -24.5l1 -13v-350h100v350q0 2 0.5 5.5t3 12t7 15t15 12t24.5 5.5z" />
+<glyph unicode="&#xe180;" d="M1200 1100v-56q-4 0 -11 -0.5t-24 -3t-30 -7.5t-24 -15t-11 -24v-888q0 -22 25 -34.5t50 -13.5l25 -2v-56h-400v56q75 0 87.5 6.5t12.5 43.5v394h-500v-394q0 -37 12.5 -43.5t87.5 -6.5v-56h-400v56q4 0 11 0.5t24 3t30 7.5t24 15t11 24v888q0 22 -25 34.5t-50 13.5 l-25 2v56h400v-56q-75 0 -87.5 -6.5t-12.5 -43.5v-394h500v394q0 37 -12.5 43.5t-87.5 6.5v56h400z" />
+<glyph unicode="&#xe181;" d="M675 1000h375q21 0 35.5 -14.5t14.5 -35.5v-150h-105l-295 -98v98l-200 200h-400l100 100h375zM100 900h300q41 0 70.5 -29.5t29.5 -70.5v-500q0 -41 -29.5 -70.5t-70.5 -29.5h-300q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5zM100 800v-200h300v200 h-300zM1100 535l-400 -133v163l400 133v-163zM100 500v-200h300v200h-300zM1100 398v-248q0 -21 -14.5 -35.5t-35.5 -14.5h-375l-100 -100h-375l-100 100h400l200 200h105z" />
+<glyph unicode="&#xe182;" d="M17 1007l162 162q17 17 40 14t37 -22l139 -194q14 -20 11 -44.5t-20 -41.5l-119 -118q102 -142 228 -268t267 -227l119 118q17 17 42.5 19t44.5 -12l192 -136q19 -14 22.5 -37.5t-13.5 -40.5l-163 -162q-3 -1 -9.5 -1t-29.5 2t-47.5 6t-62.5 14.5t-77.5 26.5t-90 42.5 t-101.5 60t-111 83t-119 108.5q-74 74 -133.5 150.5t-94.5 138.5t-60 119.5t-34.5 100t-15 74.5t-4.5 48z" />
+<glyph unicode="&#xe183;" d="M600 1100q92 0 175 -10.5t141.5 -27t108.5 -36.5t81.5 -40t53.5 -37t31 -27l9 -10v-200q0 -21 -14.5 -33t-34.5 -9l-202 34q-20 3 -34.5 20t-14.5 38v146q-141 24 -300 24t-300 -24v-146q0 -21 -14.5 -38t-34.5 -20l-202 -34q-20 -3 -34.5 9t-14.5 33v200q3 4 9.5 10.5 t31 26t54 37.5t80.5 39.5t109 37.5t141 26.5t175 10.5zM600 795q56 0 97 -9.5t60 -23.5t30 -28t12 -24l1 -10v-50l365 -303q14 -15 24.5 -40t10.5 -45v-212q0 -21 -14.5 -35.5t-35.5 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v212q0 20 10.5 45t24.5 40l365 303v50 q0 4 1 10.5t12 23t30 29t60 22.5t97 10z" />
+<glyph unicode="&#xe184;" d="M1100 700l-200 -200h-600l-200 200v500h200v-200h200v200h200v-200h200v200h200v-500zM250 400h700q21 0 35.5 -14.5t14.5 -35.5t-14.5 -35.5t-35.5 -14.5h-12l137 -100h-950l137 100h-12q-21 0 -35.5 14.5t-14.5 35.5t14.5 35.5t35.5 14.5zM50 100h1100q21 0 35.5 -14.5 t14.5 -35.5v-50h-1200v50q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe185;" d="M700 1100h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-1000h300v1000q0 41 -29.5 70.5t-70.5 29.5zM1100 800h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-700h300v700q0 41 -29.5 70.5t-70.5 29.5zM400 0h-300v400q0 41 29.5 70.5t70.5 29.5h100q41 0 70.5 -29.5t29.5 -70.5v-400z " />
+<glyph unicode="&#xe186;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 700h-200v-100h200v-300h-300v100h200v100h-200v300h300v-100zM900 700v-300l-100 -100h-200v500h200z M700 700v-300h100v300h-100z" />
+<glyph unicode="&#xe187;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 300h-100v200h-100v-200h-100v500h100v-200h100v200h100v-500zM900 700v-300l-100 -100h-200v500h200z M700 700v-300h100v300h-100z" />
+<glyph unicode="&#xe188;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 700h-200v-300h200v-100h-300v500h300v-100zM900 700h-200v-300h200v-100h-300v500h300v-100z" />
+<glyph unicode="&#xe189;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 400l-300 150l300 150v-300zM900 550l-300 -150v300z" />
+<glyph unicode="&#xe190;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM900 300h-700v500h700v-500zM800 700h-130q-38 0 -66.5 -43t-28.5 -108t27 -107t68 -42h130v300zM300 700v-300 h130q41 0 68 42t27 107t-28.5 108t-66.5 43h-130z" />
+<glyph unicode="&#xe191;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 700h-200v-100h200v-300h-300v100h200v100h-200v300h300v-100zM900 300h-100v400h-100v100h200v-500z M700 300h-100v100h100v-100z" />
+<glyph unicode="&#xe192;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM300 700h200v-400h-300v500h100v-100zM900 300h-100v400h-100v100h200v-500zM300 600v-200h100v200h-100z M700 300h-100v100h100v-100z" />
+<glyph unicode="&#xe193;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM100 900v-700h900v700h-900zM500 500l-199 -200h-100v50l199 200v150h-200v100h300v-300zM900 300h-100v400h-100v100h200v-500zM701 300h-100 v100h100v-100z" />
+<glyph unicode="&#xe194;" d="M600 1191q120 0 229.5 -47t188.5 -126t126 -188.5t47 -229.5t-47 -229.5t-126 -188.5t-188.5 -126t-229.5 -47t-229.5 47t-188.5 126t-126 188.5t-47 229.5t47 229.5t126 188.5t188.5 126t229.5 47zM600 1021q-114 0 -211 -56.5t-153.5 -153.5t-56.5 -211t56.5 -211 t153.5 -153.5t211 -56.5t211 56.5t153.5 153.5t56.5 211t-56.5 211t-153.5 153.5t-211 56.5zM800 700h-300v-200h300v-100h-300l-100 100v200l100 100h300v-100z" />
+<glyph unicode="&#xe195;" d="M600 1191q120 0 229.5 -47t188.5 -126t126 -188.5t47 -229.5t-47 -229.5t-126 -188.5t-188.5 -126t-229.5 -47t-229.5 47t-188.5 126t-126 188.5t-47 229.5t47 229.5t126 188.5t188.5 126t229.5 47zM600 1021q-114 0 -211 -56.5t-153.5 -153.5t-56.5 -211t56.5 -211 t153.5 -153.5t211 -56.5t211 56.5t153.5 153.5t56.5 211t-56.5 211t-153.5 153.5t-211 56.5zM800 700v-100l-50 -50l100 -100v-50h-100l-100 100h-150v-100h-100v400h300zM500 700v-100h200v100h-200z" />
+<glyph unicode="&#xe197;" d="M503 1089q110 0 200.5 -59.5t134.5 -156.5q44 14 90 14q120 0 205 -86.5t85 -207t-85 -207t-205 -86.5h-128v250q0 21 -14.5 35.5t-35.5 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-250h-222q-80 0 -136 57.5t-56 136.5q0 69 43 122.5t108 67.5q-2 19 -2 37q0 100 49 185 t134 134t185 49zM525 500h150q10 0 17.5 -7.5t7.5 -17.5v-275h137q21 0 26 -11.5t-8 -27.5l-223 -244q-13 -16 -32 -16t-32 16l-223 244q-13 16 -8 27.5t26 11.5h137v275q0 10 7.5 17.5t17.5 7.5z" />
+<glyph unicode="&#xe198;" d="M502 1089q110 0 201 -59.5t135 -156.5q43 15 89 15q121 0 206 -86.5t86 -206.5q0 -99 -60 -181t-150 -110l-378 360q-13 16 -31.5 16t-31.5 -16l-381 -365h-9q-79 0 -135.5 57.5t-56.5 136.5q0 69 43 122.5t108 67.5q-2 19 -2 38q0 100 49 184.5t133.5 134t184.5 49.5z M632 467l223 -228q13 -16 8 -27.5t-26 -11.5h-137v-275q0 -10 -7.5 -17.5t-17.5 -7.5h-150q-10 0 -17.5 7.5t-7.5 17.5v275h-137q-21 0 -26 11.5t8 27.5q199 204 223 228q19 19 31.5 19t32.5 -19z" />
+<glyph unicode="&#xe199;" d="M700 100v100h400l-270 300h170l-270 300h170l-300 333l-300 -333h170l-270 -300h170l-270 -300h400v-100h-50q-21 0 -35.5 -14.5t-14.5 -35.5v-50h400v50q0 21 -14.5 35.5t-35.5 14.5h-50z" />
+<glyph unicode="&#xe200;" d="M600 1179q94 0 167.5 -56.5t99.5 -145.5q89 -6 150.5 -71.5t61.5 -155.5q0 -61 -29.5 -112.5t-79.5 -82.5q9 -29 9 -55q0 -74 -52.5 -126.5t-126.5 -52.5q-55 0 -100 30v-251q21 0 35.5 -14.5t14.5 -35.5v-50h-300v50q0 21 14.5 35.5t35.5 14.5v251q-45 -30 -100 -30 q-74 0 -126.5 52.5t-52.5 126.5q0 18 4 38q-47 21 -75.5 65t-28.5 97q0 74 52.5 126.5t126.5 52.5q5 0 23 -2q0 2 -1 10t-1 13q0 116 81.5 197.5t197.5 81.5z" />
+<glyph unicode="&#xe201;" d="M1010 1010q111 -111 150.5 -260.5t0 -299t-150.5 -260.5q-83 -83 -191.5 -126.5t-218.5 -43.5t-218.5 43.5t-191.5 126.5q-111 111 -150.5 260.5t0 299t150.5 260.5q83 83 191.5 126.5t218.5 43.5t218.5 -43.5t191.5 -126.5zM476 1065q-4 0 -8 -1q-121 -34 -209.5 -122.5 t-122.5 -209.5q-4 -12 2.5 -23t18.5 -14l36 -9q3 -1 7 -1q23 0 29 22q27 96 98 166q70 71 166 98q11 3 17.5 13.5t3.5 22.5l-9 35q-3 13 -14 19q-7 4 -15 4zM512 920q-4 0 -9 -2q-80 -24 -138.5 -82.5t-82.5 -138.5q-4 -13 2 -24t19 -14l34 -9q4 -1 8 -1q22 0 28 21 q18 58 58.5 98.5t97.5 58.5q12 3 18 13.5t3 21.5l-9 35q-3 12 -14 19q-7 4 -15 4zM719.5 719.5q-49.5 49.5 -119.5 49.5t-119.5 -49.5t-49.5 -119.5t49.5 -119.5t119.5 -49.5t119.5 49.5t49.5 119.5t-49.5 119.5zM855 551q-22 0 -28 -21q-18 -58 -58.5 -98.5t-98.5 -57.5 q-11 -4 -17 -14.5t-3 -21.5l9 -35q3 -12 14 -19q7 -4 15 -4q4 0 9 2q80 24 138.5 82.5t82.5 138.5q4 13 -2.5 24t-18.5 14l-34 9q-4 1 -8 1zM1000 515q-23 0 -29 -22q-27 -96 -98 -166q-70 -71 -166 -98q-11 -3 -17.5 -13.5t-3.5 -22.
 5l9 -35q3 -13 14 -19q7 -4 15 -4 q4 0 8 1q121 34 209.5 122.5t122.5 209.5q4 12 -2.5 23t-18.5 14l-36 9q-3 1 -7 1z" />
+<glyph unicode="&#xe202;" d="M700 800h300v-380h-180v200h-340v-200h-380v755q0 10 7.5 17.5t17.5 7.5h575v-400zM1000 900h-200v200zM700 300h162l-212 -212l-212 212h162v200h100v-200zM520 0h-395q-10 0 -17.5 7.5t-7.5 17.5v395zM1000 220v-195q0 -10 -7.5 -17.5t-17.5 -7.5h-195z" />
+<glyph unicode="&#xe203;" d="M700 800h300v-520l-350 350l-550 -550v1095q0 10 7.5 17.5t17.5 7.5h575v-400zM1000 900h-200v200zM862 200h-162v-200h-100v200h-162l212 212zM480 0h-355q-10 0 -17.5 7.5t-7.5 17.5v55h380v-80zM1000 80v-55q0 -10 -7.5 -17.5t-17.5 -7.5h-155v80h180z" />
+<glyph unicode="&#xe204;" d="M1162 800h-162v-200h100l100 -100h-300v300h-162l212 212zM200 800h200q27 0 40 -2t29.5 -10.5t23.5 -30t7 -57.5h300v-100h-600l-200 -350v450h100q0 36 7 57.5t23.5 30t29.5 10.5t40 2zM800 400h240l-240 -400h-800l300 500h500v-100z" />
+<glyph unicode="&#xe205;" d="M650 1100h100q21 0 35.5

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: YARN-8179: Preemption does not happen due to natural_termination_factor when DRF is used. Contributed by Kyungwan Nam.

Posted by ar...@apache.org.
YARN-8179: Preemption does not happen due to natural_termination_factor when DRF is used. Contributed by Kyungwan Nam.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b4c44bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b4c44bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b4c44bd

Branch: refs/heads/HDDS-48
Commit: 0b4c44bdeef62945b592d5761666ad026b629c0b
Parents: 132a547
Author: Eric E Payne <er...@oath.com>
Authored: Mon May 21 20:14:58 2018 +0000
Committer: Eric E Payne <er...@oath.com>
Committed: Mon May 21 20:14:58 2018 +0000

----------------------------------------------------------------------
 .../capacity/PreemptableResourceCalculator.java |  7 ++-
 ...pacityPreemptionPolicyInterQueueWithDRF.java | 56 ++++++++++++++++++++
 2 files changed, 61 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b4c44bd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
index 2d2cdf6..676c14f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
@@ -197,8 +197,11 @@ public class PreemptableResourceCalculator
            */
           Resource resToObtain = qT.toBePreempted;
           if (!isReservedPreemptionCandidatesSelector) {
-            resToObtain = Resources.multiply(qT.toBePreempted,
-                context.getNaturalTerminationFactor());
+            if (Resources.greaterThan(rc, clusterResource, resToObtain,
+                Resource.newInstance(0, 0))) {
+              resToObtain = Resources.multiplyAndNormalizeUp(rc, qT.toBePreempted,
+                  context.getNaturalTerminationFactor(), Resource.newInstance(1, 1));
+            }
           }
 
           // Only add resToObtain when it >= 0

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b4c44bd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
index 0d6d350..c8a1f0f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
@@ -18,15 +18,28 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
+import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
+import org.junit.Before;
 import org.junit.Test;
 
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import static org.mockito.Matchers.argThat;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 public class TestProportionalCapacityPreemptionPolicyInterQueueWithDRF
     extends ProportionalCapacityPreemptionPolicyMockFramework {
+
+  @Before
+  public void setup() {
+    super.setup();
+    rc = new DominantResourceCalculator();
+    when(cs.getResourceCalculator()).thenReturn(rc);
+    policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock);
+  }
+
   @Test
   public void testInterQueuePreemptionWithMultipleResource()
       throws Exception {
@@ -65,4 +78,47 @@ public class TestProportionalCapacityPreemptionPolicyInterQueueWithDRF
         new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
             getAppAttemptId(2))));
   }
+
+  @Test
+  public void testInterQueuePreemptionWithNaturalTerminationFactor()
+      throws Exception {
+    /**
+     * Queue structure is:
+     *
+     * <pre>
+     *       root
+     *      /   \
+     *     a     b
+     * </pre>
+     *
+     * Guaranteed resource of a/b are 50:50 Total cluster resource = 100
+     * Scenario: All resources are allocated to Queue A.
+     * Even though Queue B needs few resources like 1 VCore, some resources
+     * must be preempted from the app which is running in Queue A.
+     */
+
+    conf.setFloat(
+        CapacitySchedulerConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR,
+        (float) 0.2);
+
+    String labelsConfig = "=100:50,true;";
+    String nodesConfig = // n1 has no label
+        "n1= res=100:50";
+    String queuesConfig =
+        // guaranteed,max,used,pending
+        "root(=[100:50 100:50 50:50 0:0]);" + // root
+            "-a(=[50:25 100:50 50:50 0:0]);" + // a
+            "-b(=[50:25 50:25 0:0 2:1]);"; // b
+
+    String appsConfig =
+        //queueName\t(priority,resource,host,expression,#repeat,reserved)
+        "a\t(1,2:1,n1,,50,false);"; // app1 in a
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+    policy.editSchedule();
+
+    verify(mDisp, times(1)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(1))));
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: HADOOP-15450. Avoid fsync storm triggered by DiskChecker and handle disk full situation. Contributed by Arpit Agarwal.

Posted by ar...@apache.org.
HADOOP-15450. Avoid fsync storm triggered by DiskChecker and handle disk full situation. Contributed by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcc8e76b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcc8e76b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcc8e76b

Branch: refs/heads/HDDS-48
Commit: bcc8e76badc1341a6cf995c8e44fa5e422158de8
Parents: 5f11288
Author: Kihwal Lee <ki...@apache.org>
Authored: Tue May 22 11:19:15 2018 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Tue May 22 11:20:51 2018 -0500

----------------------------------------------------------------------
 .../org/apache/hadoop/util/DiskChecker.java     |  46 ++++-
 .../org/apache/hadoop/util/TestDiskChecker.java | 102 -----------
 .../hadoop/util/TestDiskCheckerWithDiskIo.java  | 173 +++++++++++++++++++
 3 files changed, 217 insertions(+), 104 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc8e76b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
index a4fa8fd..595aeed 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
@@ -74,12 +74,30 @@ public class DiskChecker {
    * @throws DiskErrorException
    */
   public static void checkDir(File dir) throws DiskErrorException {
+    checkDirInternal(dir);
+  }
+
+  /**
+   * Create the directory if it doesn't exist and check that dir is
+   * readable, writable and executable. Perform some disk IO to
+   * ensure that the disk is usable for writes.
+   *
+   * @param dir
+   * @throws DiskErrorException
+   */
+  public static void checkDirWithDiskIo(File dir)
+      throws DiskErrorException {
+    checkDirInternal(dir);
+    doDiskIo(dir);
+  }
+
+  private static void checkDirInternal(File dir)
+      throws DiskErrorException {    
     if (!mkdirsWithExistsCheck(dir)) {
       throw new DiskErrorException("Cannot create directory: "
                                    + dir.toString());
     }
     checkAccessByFileMethods(dir);
-    doDiskIo(dir);
   }
 
   /**
@@ -94,10 +112,34 @@ public class DiskChecker {
    */
   public static void checkDir(LocalFileSystem localFS, Path dir,
                               FsPermission expected)
+      throws DiskErrorException, IOException {
+    checkDirInternal(localFS, dir, expected);
+  }
+
+
+  /**
+   * Create the local directory if necessary, also ensure permissions
+   * allow it to be read from and written into. Perform some diskIO
+   * to ensure that the disk is usable for writes. 
+   *
+   * @param localFS local filesystem
+   * @param dir directory
+   * @param expected permission
+   * @throws DiskErrorException
+   * @throws IOException
+   */  
+  public static void checkDirWithDiskIo(LocalFileSystem localFS, Path dir,
+                                        FsPermission expected) 
+      throws DiskErrorException, IOException {
+    checkDirInternal(localFS, dir, expected);
+    doDiskIo(localFS.pathToFile(dir));
+  }  
+
+  private static void checkDirInternal(LocalFileSystem localFS, Path dir,
+                                       FsPermission expected)
   throws DiskErrorException, IOException {
     mkdirsWithExistsAndPermissionCheck(localFS, dir, expected);
     checkAccessByFileMethods(localFS.pathToFile(dir));
-    doDiskIo(localFS.pathToFile(dir));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc8e76b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
index bd8e1dd..6b6c6c8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.util;
 
 import java.io.*;
 import java.nio.file.Files;
-import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.util.DiskChecker.FileIoProvider;
 import org.junit.After;
@@ -214,105 +213,4 @@ public class TestDiskChecker {
     }
     localDir.delete();
   }
-
-  /**
-   * Verify DiskChecker ignores at least 2 transient file creation errors.
-   */
-  @Test(timeout = 30000)
-  public void testDiskIoIgnoresTransientCreateErrors() throws Throwable {
-    DiskChecker.replaceFileOutputStreamProvider(new TestFileIoProvider(
-        DiskChecker.DISK_IO_MAX_ITERATIONS - 1, 0));
-    checkDirs(true, "755", true);
-  }
-
-  /**
-   * Verify DiskChecker bails after 3 file creation errors.
-   */
-  @Test(timeout = 30000)
-  public void testDiskIoDetectsCreateErrors() throws Throwable {
-    DiskChecker.replaceFileOutputStreamProvider(new TestFileIoProvider(
-        DiskChecker.DISK_IO_MAX_ITERATIONS, 0));
-    checkDirs(true, "755", false);
-  }
-
-  /**
-   * Verify DiskChecker ignores at least 2 transient file write errors.
-   */
-  @Test(timeout = 30000)
-  public void testDiskIoIgnoresTransientWriteErrors() throws Throwable {
-    DiskChecker.replaceFileOutputStreamProvider(new TestFileIoProvider(
-        0, DiskChecker.DISK_IO_MAX_ITERATIONS - 1));
-    checkDirs(true, "755", true);
-  }
-
-  /**
-   * Verify DiskChecker bails after 3 file write errors.
-   */
-  @Test(timeout = 30000)
-  public void testDiskIoDetectsWriteErrors() throws Throwable {
-    DiskChecker.replaceFileOutputStreamProvider(new TestFileIoProvider(
-        0, DiskChecker.DISK_IO_MAX_ITERATIONS));
-    checkDirs(true, "755", false);
-  }
-
-  /**
-   * Verify DiskChecker's test file naming scheme.
-   */
-  @Test(timeout = 30000)
-  public void testDiskIoFileNaming() throws Throwable {
-    final File rootDir = new File("/");
-    assertTrue(".001".matches("\\.00\\d$"));
-    for (int i = 1; i < DiskChecker.DISK_IO_MAX_ITERATIONS; ++i) {
-      final File file = DiskChecker.getFileNameForDiskIoCheck(rootDir, i);
-      assertTrue(
-          "File name does not match expected pattern: " + file,
-          file.toString().matches("^.*\\.[0-9]+$"));
-    }
-    final File guidFile = DiskChecker.getFileNameForDiskIoCheck(
-        rootDir, DiskChecker.DISK_IO_MAX_ITERATIONS);
-    assertTrue(
-        "File name does not match expected pattern: " + guidFile,
-        guidFile.toString().matches("^.*\\.[A-Za-z0-9-]+$"));
-  }
-
-  /**
-   * A dummy {@link DiskChecker#FileIoProvider} that can throw a programmable
-   * number of times.
-   */
-  private static class TestFileIoProvider implements FileIoProvider {
-    private final AtomicInteger numCreateCalls = new AtomicInteger(0);
-    private final AtomicInteger numWriteCalls = new AtomicInteger(0);
-
-    private final int numTimesToThrowOnCreate;
-    private final int numTimesToThrowOnWrite;
-
-    public TestFileIoProvider(
-        int numTimesToThrowOnCreate, int numTimesToThrowOnWrite) {
-      this.numTimesToThrowOnCreate = numTimesToThrowOnCreate;
-      this.numTimesToThrowOnWrite = numTimesToThrowOnWrite;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public FileOutputStream get(File f) throws FileNotFoundException {
-      if (numCreateCalls.getAndIncrement() < numTimesToThrowOnCreate) {
-        throw new FileNotFoundException("Dummy exception for testing");
-      }
-      // Can't mock final class FileOutputStream.
-      return new FileOutputStream(f);
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void write(FileOutputStream fos, byte[] data) throws IOException {
-      if (numWriteCalls.getAndIncrement() < numTimesToThrowOnWrite) {
-        throw new IOException("Dummy exception for testing");
-      }
-      fos.write(data);
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc8e76b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskCheckerWithDiskIo.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskCheckerWithDiskIo.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskCheckerWithDiskIo.java
new file mode 100644
index 0000000..9446273
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskCheckerWithDiskIo.java
@@ -0,0 +1,173 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.util.DiskChecker.FileIoProvider;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.attribute.PosixFilePermission;
+import java.nio.file.attribute.PosixFilePermissions;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.junit.Assert.assertTrue;
+
+
+/**
+ * Verify {@link DiskChecker} validation routines that perform
+ * Disk IO.
+ */
+public final class TestDiskCheckerWithDiskIo {
+  @Rule
+  public Timeout testTimeout = new Timeout(30_000);
+  
+  /**
+   * Verify DiskChecker ignores at least 2 transient file creation errors.
+   */
+  @Test
+  public final void testDiskIoIgnoresTransientCreateErrors() throws Throwable {
+    DiskChecker.replaceFileOutputStreamProvider(new TestFileIoProvider(
+        DiskChecker.DISK_IO_MAX_ITERATIONS - 1, 0));
+    checkDirs(true);
+  }
+
+  /**
+   * Verify DiskChecker bails after 3 file creation errors.
+   */
+  @Test(expected = DiskErrorException.class)
+  public final void testDiskIoDetectsCreateErrors() throws Throwable {
+    DiskChecker.replaceFileOutputStreamProvider(new TestFileIoProvider(
+        DiskChecker.DISK_IO_MAX_ITERATIONS, 0));
+    checkDirs(false);
+  }
+
+  /**
+   * Verify DiskChecker ignores at least 2 transient file write errors.
+   */
+  @Test
+  public final void testDiskIoIgnoresTransientWriteErrors() throws Throwable {
+    DiskChecker.replaceFileOutputStreamProvider(new TestFileIoProvider(
+        0, DiskChecker.DISK_IO_MAX_ITERATIONS - 1));
+    checkDirs(true);
+  }
+
+  /**
+   * Verify DiskChecker bails after 3 file write errors.
+   */
+  @Test(expected = DiskErrorException.class)
+  public final void testDiskIoDetectsWriteErrors() throws Throwable {
+    DiskChecker.replaceFileOutputStreamProvider(new TestFileIoProvider(
+        0, DiskChecker.DISK_IO_MAX_ITERATIONS));
+    checkDirs(false);
+  }
+
+  /**
+   * Verify DiskChecker's test file naming scheme.
+   */
+  @Test
+  public void testDiskIoFileNaming() {
+    final File rootDir = new File("/");
+    assertTrue(".001".matches("\\.00\\d$"));
+    for (int i = 1; i < DiskChecker.DISK_IO_MAX_ITERATIONS; ++i) {
+      final File file = DiskChecker.getFileNameForDiskIoCheck(rootDir, i);
+      assertTrue(
+          "File name does not match expected pattern: " + file,
+          file.toString().matches("^.*\\.[0-9]+$"));
+    }
+    final File guidFile = DiskChecker.getFileNameForDiskIoCheck(
+        rootDir, DiskChecker.DISK_IO_MAX_ITERATIONS);
+    assertTrue(
+        "File name does not match expected pattern: " + guidFile,
+        guidFile.toString().matches("^.*\\.[A-Za-z0-9-]+$"));
+  }
+
+  /**
+   * A dummy {@link DiskChecker#FileIoProvider} that can throw a programmable
+   * number of times.
+   */
+  private static class TestFileIoProvider implements FileIoProvider {
+    private final AtomicInteger numCreateCalls = new AtomicInteger(0);
+    private final AtomicInteger numWriteCalls = new AtomicInteger(0);
+
+    private final int numTimesToThrowOnCreate;
+    private final int numTimesToThrowOnWrite;
+
+    public TestFileIoProvider(
+        int numTimesToThrowOnCreate, int numTimesToThrowOnWrite) {
+      this.numTimesToThrowOnCreate = numTimesToThrowOnCreate;
+      this.numTimesToThrowOnWrite = numTimesToThrowOnWrite;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public FileOutputStream get(File f) throws FileNotFoundException {
+      if (numCreateCalls.getAndIncrement() < numTimesToThrowOnCreate) {
+        throw new FileNotFoundException("Dummy exception for testing");
+      }
+      // Can't mock final class FileOutputStream.
+      return new FileOutputStream(f);
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public void write(FileOutputStream fos, byte[] data) throws IOException {
+      if (numWriteCalls.getAndIncrement() < numTimesToThrowOnWrite) {
+        throw new IOException("Dummy exception for testing");
+      }
+      fos.write(data);
+    }
+  }
+
+  private void checkDirs(boolean success)
+      throws Throwable {
+    File localDir = createTempDir();
+    try {
+      DiskChecker.checkDirWithDiskIo(localDir);
+    } finally {
+      localDir.delete();
+    }
+  }
+
+  /**
+   * Create an empty directory with a random name under test directory
+   * with Posix permissions "0755".
+   *
+   * @return the created directory
+   * @throws java.io.IOException if any
+   */
+  private File createTempDir() throws java.io.IOException {
+    final File testDir = new File(System.getProperty("test.build.data"));
+    return Files.createTempDirectory(testDir.toPath(), "test",
+        PosixFilePermissions.asFileAttribute(
+            PosixFilePermissions.fromString("rwxr-xr-x"))).toFile();
+  }  
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: HDFS-13586. Fsync fails on directories on Windows. Contributed by Lukas Majercack.

Posted by ar...@apache.org.
HDFS-13586. Fsync fails on directories on Windows. Contributed by Lukas Majercack.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87836136
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87836136
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87836136

Branch: refs/heads/HDDS-48
Commit: 8783613696674aba4ae1739c6e8f48cda0d1c386
Parents: 2f2dd22
Author: Inigo Goiri <in...@apache.org>
Authored: Thu May 17 19:26:44 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Thu May 17 19:26:44 2018 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/io/IOUtils.java           | 7 +++++++
 1 file changed, 7 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87836136/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index f451ff3..7288812 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -414,6 +414,13 @@ public class IOUtils {
           "File/Directory " + fileToSync.getAbsolutePath() + " does not exist");
     }
     boolean isDir = fileToSync.isDirectory();
+
+    // HDFS-13586, FileChannel.open fails with AccessDeniedException
+    // for any directory, ignore.
+    if (isDir && Shell.WINDOWS) {
+      return;
+    }
+
     // If the file is a directory we have to open read-only, for regular files
     // we must open r/w for the fsync to have an effect. See
     // http://blog.httrack.com/blog/2013/11/15/


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/js/ozonedoc.js
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/js/ozonedoc.js b/hadoop-ozone/docs/themes/ozonedoc/static/js/ozonedoc.js
new file mode 100644
index 0000000..3f96f00
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/static/js/ozonedoc.js
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+$(
+  function(){
+    $("table").addClass("table table-condensed table-bordered table-striped");
+  }
+);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/theme.toml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/theme.toml b/hadoop-ozone/docs/themes/ozonedoc/theme.toml
new file mode 100644
index 0000000..9f427fe
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/theme.toml
@@ -0,0 +1,2 @@
+
+name = "Ozonedoc"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md b/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md
deleted file mode 100644
index fc63742..0000000
--- a/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md
+++ /dev/null
@@ -1,150 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-Ozone Command Shell
-===================
-
-Ozone command shell gives a command shell interface to work against ozone.
-Please note that this  document assumes that cluster is deployed
-with simple authentication.
-
-The Ozone commands take the following format.
-
-* `ozone oz --command_ http://hostname:port/volume/bucket/key -user
-<name> -root`
-
-The *port* specified in command should match the port mentioned in the config
-property `hdds.rest.http-address`. This property can be set in `ozone-site.xml`.
-The default value for the port is `9880` and is used in below commands.
-
-The *-root* option is a command line short cut that allows *ozone oz*
-commands to be run as the user that started the cluster. This is useful to
-indicate that you want the commands to be run as some admin user. The only
-reason for this option is that it makes the life of a lazy developer more
-easier.
-
-Ozone Volume Commands
---------------------
-
-The volume commands allow users to create, delete and list the volumes in the
-ozone cluster.
-
-### Create Volume
-
-Volumes can be created only by Admins. Here is an example of creating a volume.
-
-* `ozone oz -createVolume http://localhost:9880/hive -user bilbo -quota
-100TB -root`
-
-The above command creates a volume called `hive` owned by user `bilbo`. The
-`-root` option allows the command to be executed as user `hdfs` which is an
-admin in the cluster.
-
-### Update Volume
-
-Updates information like ownership and quota on an existing volume.
-
-* `ozone oz  -updateVolume  http://localhost:9880/hive -quota 500TB -root`
-
-The above command changes the volume quota of hive from 100TB to 500TB.
-
-### Delete Volume
-Deletes a Volume if it is empty.
-
-* `ozone oz -deleteVolume http://localhost:9880/hive -root`
-
-
-### Info Volume
-Info volume command allows the owner or the administrator of the cluster to read meta-data about a specific volume.
-
-* `ozone oz -infoVolume http://localhost:9880/hive -root`
-
-### List Volumes
-
-List volume command can be used by administrator to list volumes of any user. It can also be used by a user to list volumes owned by him.
-
-* `ozone oz -listVolume http://localhost:9880/ -user bilbo -root`
-
-The above command lists all volumes owned by user bilbo.
-
-Ozone Bucket Commands
---------------------
-
-Bucket commands follow a similar pattern as volume commands. However bucket commands are designed to be run by the owner of the volume.
-Following examples assume that these commands are run by the owner of the volume or bucket.
-
-
-### Create Bucket
-
-Create bucket call allows the owner of a volume to create a bucket.
-
-* `ozone oz -createBucket http://localhost:9880/hive/january`
-
-This call creates a bucket called `january` in the volume called `hive`. If
-the volume does not exist, then this call will fail.
-
-
-### Update Bucket
-Updates bucket meta-data, like ACLs.
-
-* `ozone oz -updateBucket http://localhost:9880/hive/january  -addAcl
-user:spark:rw`
-
-### Delete Bucket
-Deletes a bucket if it is empty.
-
-* `ozone oz -deleteBucket http://localhost:9880/hive/january`
-
-### Info Bucket
-Returns information about a given bucket.
-
-* `ozone oz -infoBucket http://localhost:9880/hive/january`
-
-### List Buckets
-List buckets on a given volume.
-
-* `ozone oz -listBucket http://localhost:9880/hive`
-
-Ozone Key Commands
-------------------
-
-Ozone key commands allows users to put, delete and get keys from ozone buckets.
-
-### Put Key
-Creates or overwrites a key in ozone store, -file points to the file you want
-to upload.
-
-* `ozone oz -putKey  http://localhost:9880/hive/january/processed.orc  -file
-processed.orc`
-
-### Get Key
-Downloads a file from the ozone bucket.
-
-* `ozone oz -getKey  http://localhost:9880/hive/january/processed.orc  -file
-  processed.orc.copy`
-
-### Delete Key
-Deletes a key  from the ozone store.
-
-* `ozone oz -deleteKey http://localhost:9880/hive/january/processed.orc`
-
-### Info Key
-Reads  key metadata from the ozone store.
-
-* `ozone oz -infoKey http://localhost:9880/hive/january/processed.orc`
-
-### List Keys
-List all keys in an ozone bucket.
-
-* `ozone oz -listKey  http://localhost:9880/hive/january`

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneGettingStarted.md.vm
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneGettingStarted.md.vm b/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneGettingStarted.md.vm
deleted file mode 100644
index 9e96098..0000000
--- a/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneGettingStarted.md.vm
+++ /dev/null
@@ -1,347 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-Ozone - Object store for Hadoop
-==============================
-
-Introduction
-------------
-Ozone is an object store for Hadoop. It  is a redundant, distributed object
-store build by leveraging primitives present in HDFS. Ozone supports REST
-API for accessing the store.
-
-Getting Started
----------------
-Ozone is a work in progress and currently lives in the hadoop source tree.
-The subprojects (ozone/hdds) are part of the hadoop source tree but by default
-not compiled and not part of the official releases. To
-use it, you have to build a package by yourself and deploy a cluster.
-
-### Building Ozone
-
-To build Ozone, please checkout the hadoop sources from github. Then
-checkout the trunk branch and build it.
-
-`mvn clean package -DskipTests=true -Dmaven.javadoc.skip=true -Pdist -Phdds -Dtar -DskipShade`
-
-skipShade is just to make compilation faster and not really required.
-
-This will give you a tarball in your distribution directory. This is the
-tarball that can be used for deploying your hadoop cluster. Here is an
-example of the tarball that will be generated.
-
-* `~/apache/hadoop/hadoop-dist/target/${project.version}.tar.gz`
-
-At this point we have an option to setup a physical cluster or run ozone via
-docker.
-
-Running Ozone via Docker
-------------------------
-
-This assumes that you have a running docker setup on the machine. Please run
-these following commands to see ozone in action.
-
- Go to the directory where the docker compose files exist.
-
-
- - `cd hadoop-dist/target/compose/ozone`
-
-Tell docker to start ozone, this will start a KSM, SCM and a single datanode in
-the background.
-
-
- - `docker-compose up -d`
-
-Now let us run some work load against ozone, to do that we will run freon.
-
-This will log into the datanode and run bash.
-
- - `docker-compose exec datanode bash`
-
-Now you can run the `ozone` command shell or freon, the ozone load generator.
-
-This is the command to run freon.
-
- - `ozone freon -mode offline -validateWrites -numOfVolumes 1 -numOfBuckets 10 -numOfKeys 100`
-
-You can checkout the KSM UI to see the requests information.
-
- - `http://localhost:9874/`
-
-If you need more datanode you can scale up:
-
- - `docker-compose scale datanode=3`
-
-Running Ozone using a real cluster
-----------------------------------
-
-Please proceed to setup a hadoop cluster by creating the hdfs-site.xml and
-other configuration files that are needed for your cluster.
-
-
-### Ozone Configuration
-
-Ozone relies on its own configuration file called `ozone-site.xml`. It is
-just for convenience and ease of management --  you can add these settings
-to `hdfs-site.xml`, if you don't want to keep ozone settings separate.
-This document refers to `ozone-site.xml` so that ozone settings are in one
-place  and not mingled with HDFS settings.
-
- * _*ozone.enabled*_  This is the most important setting for ozone.
- Currently, Ozone is an opt-in subsystem of HDFS. By default, Ozone is
- disabled. Setting this flag to `true` enables ozone in the HDFS cluster.
- Here is an example,
-
-```
-    <property>
-       <name>ozone.enabled</name>
-       <value>True</value>
-    </property>
-```
- *  _*ozone.metadata.dirs*_ Ozone is designed with modern hardware
- in mind. It tries to use SSDs effectively. So users can specify where the
- metadata must reside. Usually you pick your fastest disk (SSD if
- you have them on your nodes). KSM, SCM and datanode will write the metadata
- to these disks. This is a required setting, if this is missing Ozone will
- fail to come up. Here is an example,
-
-```
-   <property>
-      <name>ozone.metadata.dirs</name>
-      <value>/data/disk1/meta</value>
-   </property>
-```
-
-* _*ozone.scm.names*_ Ozone is build on top of container framework. Storage
- container manager(SCM) is a distributed block service which is used by ozone
- and other storage services.
- This property allows datanodes to discover where SCM is, so that
- datanodes can send heartbeat to SCM. SCM is designed to be highly available
- and datanodes assume there are multiple instances of SCM which form a highly
- available ring. The HA feature of SCM is a work in progress. So we
- configure ozone.scm.names to be a single machine. Here is an example,
-
-```
-    <property>
-      <name>ozone.scm.names</name>
-      <value>scm.hadoop.apache.org</value>
-    </property>
-```
-
-* _*ozone.scm.datanode.id*_ Each datanode that speaks to SCM generates an ID
-just like HDFS.  This is an optional setting. Please note:
-This path will be created by datanodes if it doesn't exist already. Here is an
- example,
-
-```
-   <property>
-      <name>ozone.scm.datanode.id</name>
-      <value>/data/disk1/scm/meta/node/datanode.id</value>
-   </property>
-```
-
-* _*ozone.scm.block.client.address*_ Storage Container Manager(SCM) offers a
- set of services that can be used to build a distributed storage system. One
- of the services offered is the block services. KSM and HDFS would use this
- service. This property describes where KSM can discover SCM's block service
- endpoint. There is corresponding ports etc, but assuming that we are using
- default ports, the server address is the only required field. Here is an
- example,
-
-```
-    <property>
-      <name>ozone.scm.block.client.address</name>
-      <value>scm.hadoop.apache.org</value>
-    </property>
-```
-
-* _*ozone.ksm.address*_ KSM server address. This is used by Ozonehandler and
-Ozone File System.
-
-```
-    <property>
-       <name>ozone.ksm.address</name>
-       <value>ksm.hadoop.apache.org</value>
-    </property>
-```
-
-* _*dfs.datanode.plugin*_ Datanode service plugins: the container manager part
- of ozone is running inside the datanode as a service plugin. To activate ozone
- you should define the service plugin implementation class. **Important**
- It should be added to the **hdfs-site.xml** as the plugin should be activated
- as part of the normal HDFS Datanode bootstrap.
-
-```
-    <property>
-       <name>dfs.datanode.plugins</name>
-       <value>org.apache.hadoop.ozone.HddsDatanodeService</value>
-    </property>
-```
-
-Here is a quick summary of settings needed by Ozone.
-
-| Setting                        | Value                        | Comment |
-|--------------------------------|------------------------------|------------------------------------------------------------------|
-| ozone.enabled                  | True                         | This enables SCM and  containers in HDFS cluster.                |
-| ozone.metadata.dirs            | file path                    | The metadata will be stored here.                                |
-| ozone.scm.names                | SCM server name              | Hostname:port or or IP:port address of SCM.                      |
-| ozone.scm.block.client.address | SCM server name and port     | Used by services like KSM                                        |
-| ozone.scm.client.address       | SCM server name and port     | Used by client side                                              |
-| ozone.scm.datanode.address     | SCM server name and port     | Used by datanode to talk to SCM                                  |
-| ozone.ksm.address              | KSM server name              | Used by Ozone handler and Ozone file system.                     |
-
- Here is a working example of`ozone-site.xml`.
-
-```
-    <?xml version="1.0" encoding="UTF-8"?>
-    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-    <configuration>
-      <property>
-          <name>ozone.enabled</name>
-          <value>True</value>
-        </property>
-
-        <property>
-          <name>ozone.metadata.dirs</name>
-          <value>/data/disk1/ozone/meta</value>
-        </property>
-
-        <property>
-          <name>ozone.scm.names</name>
-          <value>127.0.0.1</value>
-        </property>
-
-        <property>
-           <name>ozone.scm.client.address</name>
-           <value>127.0.0.1:9860</value>
-        </property>
-
-         <property>
-           <name>ozone.scm.block.client.address</name>
-           <value>127.0.0.1:9863</value>
-         </property>
-
-         <property>
-           <name>ozone.scm.datanode.address</name>
-           <value>127.0.0.1:9861</value>
-         </property>
-
-         <property>
-           <name>ozone.ksm.address</name>
-           <value>127.0.0.1:9874</value>
-         </property>
-    </configuration>
-```
-
-And don't forget to enable the datanode component with adding the
-following configuration to the hdfs-site.xml:
-
-```
-    <property>
-       <name>dfs.datanode.plugins</name>
-       <value>org.apache.hadoop.ozone.HddsDatanodeService</value>
-    </property>
-```
-
-### Starting Ozone
-
-Ozone is designed to run concurrently with HDFS. The simplest way to [start
-HDFS](../hadoop-common/ClusterSetup.html) is to run `start-dfs.sh` from the
-`$HADOOP/sbin/start-dfs.sh`. Once HDFS
-is running, please verify it is fully functional by running some commands like
-
-   - *./hdfs dfs -mkdir /usr*
-   - *./hdfs dfs -ls /*
-
- Once you are sure that HDFS is running, start Ozone. To start  ozone, you
- need to start SCM and KSM. Currently we assume that both KSM and SCM
-  is running on the same node, this will change in future.
-
- The first time you bring up Ozone, SCM must be initialized.
-
-   - `./ozone scm -init`
-
- Start SCM.
-
-   - `./ozone --daemon start scm`
-
- Once SCM gets started, KSM must be initialized.
-
-   - `./ozone ksm -createObjectStore`
-
- Start KSM.
-
-   - `./ozone --daemon start ksm`
-
-if you would like to start HDFS and Ozone together, you can do that by running
- a single command.
- - `$HADOOP/sbin/start-ozone.sh`
-
- This command will start HDFS and then start the ozone components.
-
- Once you have ozone running you can use these ozone [shell](./OzoneCommandShell.html)
- commands to  create a  volume, bucket and keys.
-
-### Diagnosing issues
-
-Ozone tries not to pollute the existing HDFS streams of configuration and
-logging. So ozone logs are by default configured to be written to a file
-called `ozone.log`. This is controlled by the settings in `log4j.properties`
-file in the hadoop configuration directory.
-
-Here is the log4j properties that are added by ozone.
-
-
-```
-   #
-   # Add a logger for ozone that is separate from the Datanode.
-   #
-   #log4j.debug=true
-   log4j.logger.org.apache.hadoop.ozone=DEBUG,OZONE,FILE
-
-   # Do not log into datanode logs. Remove this line to have single log.
-   log4j.additivity.org.apache.hadoop.ozone=false
-
-   # For development purposes, log both to console and log file.
-   log4j.appender.OZONE=org.apache.log4j.ConsoleAppender
-   log4j.appender.OZONE.Threshold=info
-   log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout
-   log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
-    %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n
-
-   # Real ozone logger that writes to ozone.log
-   log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
-   log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log
-   log4j.appender.FILE.Threshold=debug
-   log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
-   log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
-     (%F:%L) %X{function} %X{resource} %X{user} %X{request} - \
-      %m%n
-```
-
-If you would like to have a single datanode log instead of ozone stuff
-getting written to ozone.log, please remove this line or set this to true.
-
- ` log4j.additivity.org.apache.hadoop.ozone=false`
-
-On the SCM/KSM side, you will be able to see
-
-  - `hadoop-hdfs-ksm-hostname.log`
-  - `hadoop-hdfs-scm-hostname.log`
-
-Please file any issues you see under the related issues:
-
- - [Object store in HDFS: HDFS-7240](https://issues.apache.org/jira/browse/HDFS-7240)
- - [Ozone File System: HDFS-13074](https://issues.apache.org/jira/browse/HDFS-13074)
- - [Building HDFS on top of new storage layer (HDDS): HDFS-10419](https://issues.apache.org/jira/browse/HDFS-10419)
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneMetrics.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneMetrics.md b/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneMetrics.md
deleted file mode 100644
index f5eccf6..0000000
--- a/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneMetrics.md
+++ /dev/null
@@ -1,166 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-
-
-HDFS Ozone Metrics
-===============
-
-<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
-
-Overview
---------
-
-The container metrics that is used in HDFS Ozone.
-
-### Storage Container Metrics
-
-The metrics for various storage container operations in HDFS Ozone.
-
-Storage container is an optional service that can be enabled by setting
-'ozone.enabled' to true.
-These metrics are only available when ozone is enabled.
-
-Storage Container Metrics maintains a set of generic metrics for all
-container RPC calls that can be made to a datandoe/container.
-
-Along with the total number of RPC calls containers maintain a set of metrics
-for each RPC call. Following is the set of counters maintained for each RPC
-operation.
-
-*Total number of operation* - We maintain an array which counts how
-many times a specific operation has been performed.
-Eg.`NumCreateContainer` tells us how many times create container has been
-invoked on this datanode.
-
-*Total number of pending operation* - This is an array which counts how
-many times a specific operation is waitting to be processed from the client
-point of view.
-Eg.`NumPendingCreateContainer` tells us how many create container requests that
-waitting to be processed.
-
-*Average latency of each pending operation in nanoseconds* - The average latency
-of the operation from the client point of view.
-Eg. `CreateContainerLatencyAvgTime` - This tells us the average latency of
-Create Container from the client point of view.
-
-*Number of bytes involved in a specific command* - This is an array that is
-maintained for all operations, but makes sense only for read and write
-operations.
-
-While it is possible to read the bytes in update container, it really makes
-no sense, since no data stream involved. Users are advised to use this
-metric only when it makes sense. Eg. `BytesReadChunk` -- Tells us how
-many bytes have been read from this data using Read Chunk operation.
-
-*Average Latency of each operation* - The average latency of the operation.
-Eg. `LatencyCreateContainerAvgTime` - This tells us the average latency of
-Create Container.
-
-*Quantiles for each of these operations* - The 50/75/90/95/99th percentile
-of these operations. Eg. `CreateContainerNanos60s50thPercentileLatency` --
-gives latency of the create container operations at the 50th percentile latency
-(1 minute granularity). We report 50th, 75th, 90th, 95th and 99th percentile
-for all RPCs.
-
-So this leads to the containers reporting these counters for each of these
-RPC operations.
-
-| Name | Description |
-|:---- |:---- |
-| `NumOps` | Total number of container operations |
-| `CreateContainer` | Create container operation |
-| `ReadContainer` | Read container operation |
-| `UpdateContainer` | Update container operations |
-| `DeleteContainer` | Delete container operations |
-| `ListContainer` | List container operations |
-| `PutKey` | Put key operations |
-| `GetKey` | Get key operations |
-| `DeleteKey` | Delete key operations |
-| `ListKey` | List key operations |
-| `ReadChunk` | Read chunk operations |
-| `DeleteChunk` | Delete chunk operations |
-| `WriteChunk` | Write chunk operations|
-| `ListChunk` | List chunk operations |
-| `CompactChunk` | Compact chunk operations |
-| `PutSmallFile` | Put small file operations |
-| `GetSmallFile` | Get small file operations |
-| `CloseContainer` | Close container operations |
-
-### Storage Container Manager Metrics
-
-The metrics for containers that managed by Storage Container Manager.
-
-Storage Container Manager (SCM) is a master service which keeps track of
-replicas of storage containers. It also manages all data nodes and their
-states, dealing with container reports and dispatching commands for execution.
-
-Following are the counters for containers:
-
-| Name | Description |
-|:---- |:---- |
-| `LastContainerReportSize` | Total size in bytes of all containers in latest container report that SCM received from datanode |
-| `LastContainerReportUsed` | Total number of bytes used by all containers in latest container report that SCM received from datanode |
-| `LastContainerReportKeyCount` | Total number of keys in all containers in latest container report that SCM received from datanode |
-| `LastContainerReportReadBytes` | Total number of bytes have been read from all containers in latest container report that SCM received from datanode |
-| `LastContainerReportWriteBytes` | Total number of bytes have been written into all containers in latest container report that SCM received from datanode |
-| `LastContainerReportReadCount` | Total number of times containers have been read from in latest container report that SCM received from datanode |
-| `LastContainerReportWriteCount` | Total number of times containers have been written to in latest container report that SCM received from datanode |
-| `ContainerReportSize` | Total size in bytes of all containers over whole cluster |
-| `ContainerReportUsed` | Total number of bytes used by all containers over whole cluster |
-| `ContainerReportKeyCount` | Total number of keys in all containers over whole cluster |
-| `ContainerReportReadBytes` | Total number of bytes have been read from all containers over whole cluster |
-| `ContainerReportWriteBytes` | Total number of bytes have been written into all containers over whole cluster |
-| `ContainerReportReadCount` | Total number of times containers have been read from over whole cluster |
-| `ContainerReportWriteCount` | Total number of times containers have been written to over whole cluster |
-
-### Key Space Metrics
-
-The metrics for various key space manager operations in HDFS Ozone.
-
-key space manager (KSM) is a service that similar to the Namenode in HDFS.
-In the current design of KSM, it maintains metadata of all volumes, buckets and keys.
-These metrics are only available when ozone is enabled.
-
-Following is the set of counters maintained for each key space operation.
-
-*Total number of operation* - We maintain an array which counts how
-many times a specific operation has been performed.
-Eg.`NumVolumeCreate` tells us how many times create volume has been
-invoked in KSM.
-
-*Total number of failed operation* - This type operation is opposite to the above
-operation.
-Eg.`NumVolumeCreateFails` tells us how many times create volume has been invoked
-failed in KSM.
-
-Following are the counters for each of key space operations.
-
-| Name | Description |
-|:---- |:---- |
-| `VolumeCreate` | Create volume operation |
-| `VolumeUpdates` | Update volume property operation |
-| `VolumeInfos` | Get volume information operation |
-| `VolumeCheckAccesses` | Check volume access operation |
-| `VolumeDeletes` | Delete volume operation |
-| `VolumeLists` | List volume operation |
-| `BucketCreates` | Create bucket operation |
-| `BucketInfos` | Get bucket information operation |
-| `BucketUpdates` | Update bucket property operation |
-| `BucketDeletes` | Delete bucket operation |
-| `BucketLists` | List bucket operation |
-| `KeyAllocate` | Allocate key operation |
-| `KeyLookup` | Look up key operation |
-| `KeyDeletes` | Delete key operation |
-| `KeyLists` | List key operation |
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneOverview.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneOverview.md b/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneOverview.md
deleted file mode 100644
index 41d7dbd..0000000
--- a/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneOverview.md
+++ /dev/null
@@ -1,88 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-Ozone Overview
-==============
-
-
Ozone is an Object store for Apache Hadoop. It aims to scale to billions of
-keys. 
The following is a high-level overview of the core components of Ozone.


-
-![Ozone Architecture Overview](images/ozoneoverview.png) 


-
-The main elements of Ozone are
:
-
-### Clients
-Ozone ships with a set of ready-made clients. They are 
Ozone CLI and Freon.

-
-    * [Ozone CLI](./OzoneCommandShell.html) is the command line interface like 'hdfs' command.

-
-    * Freon is a  load generation tool for Ozone.

-
-### REST Handler
-Ozone provides both an RPC (Remote Procedure Call) as well as a  REST
-(Representational State Transfer) style interface. This allows clients to be
-written in many languages quickly. Ozone strives to maintain a similar
-interface between REST and RPC. The Rest handler offers the REST protocol
-services of Ozone.
-
-For most purposes, a client can make one line change to switch from REST to
-RPC or vice versa.  

-
-### Ozone File System
-Ozone file system (TODO: Add documentation) is a Hadoop compatible file system.
-This is the important user-visible component of ozone.
-This allows Hadoop services and applications like Hive/Spark to run against
-Ozone without any change.
-
-### Ozone Client
-This is like DFSClient in HDFS. This acts as the standard client to talk to
-Ozone. All other components that we have discussed so far rely on Ozone client
-(TODO: Add Ozone client documentation).

-
-### Key Space Manager

-Key Space Manager(KSM) takes care of the Ozone's namespace.
-All ozone entities like volumes, buckets and keys are managed by KSM
-(TODO: Add KSM documentation). In Short, KSM is the metadata manager for Ozone.
-KSM talks to blockManager(SCM) to get blocks and passes it on to the Ozone
-client.  Ozone client writes data to these blocks.
-KSM will eventually be replicated via Apache Ratis for High Availability.

-
-### Storage Container Manager
-Storage Container Manager (SCM) is the block and cluster manager for Ozone.
-SCM along with data nodes offer a service called 'containers'.
-A container is a group unrelated of blocks that are managed together
-as a single entity.
-
-SCM offers the following abstractions.


-
-![SCM Abstractions](images/scmservices.png)
-#### Blocks
-Blocks are like blocks in HDFS. They are replicated store of data.
-
-#### Containers
-A collection of blocks replicated and managed together.
-
-#### Pipelines
-SCM allows each container to choose its method of replication.
-For example, a container might decide that it needs only one copy of a  block
-and might choose a stand-alone pipeline. Another container might want to have
-a very high level of reliability and pick a RATIS based pipeline. In other
-words, SCM allows different kinds of replication strategies to co-exist.
-
-#### Pools
-A group of data nodes is called a pool. For scaling purposes,
-we define a pool as a set of machines. This makes management of datanodes
-easier.
-
-#### Nodes
-The data node where data is stored.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneRest.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneRest.md b/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneRest.md
deleted file mode 100644
index 13fe00d..0000000
--- a/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneRest.md
+++ /dev/null
@@ -1,549 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-Ozone REST API's.
-===================
-
-<!-- MACRO{toc|fromDepth=0|toDepth=1} -->
-
-Overview
---------
-
-The Ozone REST API's allows user to access ozone via  REST protocol.
-
-Authentication and Authorization
---------------------
-
-For time being, The default authentication mode of REST API is insecure access
-mode, which is *Simple* mode. Under this mode, ozone server trusts the user
-name specified by client and it does not perform any authentication.
-
-User name can be specified in HTTP header by
-
-* `x-ozone-user: {USER_NAME}`
-
-for example if add following header *x-ozone-user: bilbo* in the HTTP request,
-then operation will be executed as *bilbo* user.
-In *Simple* mode, there is no real authorization either. Client can be
-authorized to obtain administrator privilege by using HTTP header
-
-* `Authorization: {AUTH_METHOD} {SIGNATURE}`
-
-for example set following header *Authorization: OZONE root* in the HTTP request,
-then ozone will authorize the client with administrator privilege.
-
-Common REST Headers
---------------------
-
-The following HTTP headers must be set for each REST call.
-
-| Property | Description |
-|:---- |:----
-| Authorization | The authorization field determines which authentication method is used by ozone. Currently only *simple* mode is supported, the corresponding value is *OZONE*. Optionally an user name can be set as *OZONE {USER_NAME}* to authorize as a particular user. |
-| Date | Standard HTTP header that represents dates. The format is - day of the week, month, day, year and time (military time format) in GMT. Any other time zone will be rejected by ozone server. Eg. *Date : Mon, Apr 4, 2016 06:22:00 GMT*. This field is required. |
-| x-ozone-version | A required HTTP header to indicate which version of API this call will be communicating to. E.g *x-ozone-version: v1*. Currently ozone only publishes v1 version API. |
-
-Common Reply Headers
---------------------
-
-The common reply headers are part of all Ozone server replies.
-
-| Property | Description |
-|:---- |:----
-| Date | This is the HTTP date header and it is set to server’s local time expressed in GMT. |
-| x-ozone-request-id | This is a UUID string that represents an unique request ID. This ID is used to track the request through the ozone system and is useful for debugging purposes. |
-| x-ozone-server-name | Fully qualified domain name of the sever which handled the request. |
-
-Volume APIs
---------------------
-
-### Create a Volume
-
-This API allows admins to create a new storage volume.
-
-Schema:
-
-- `POST /{volume}?quota=<VOLUME_QUOTA>`
-
-Query Parameter:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| quota | long<BYTES \| MB \| GB \| TB> | Optional. Quota size in BYTEs, MBs, GBs or TBs |
-
-Sample HTTP POST request:
-
-    curl -i -X POST -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root" "http://localhost:9880/volume-to-create"
-
-this request creates a volume as user *bilbo*, the authorization field is set to *OZONE root* because this call requires administration privilege. The client receives a response with zero content length.
-
-    HTTP/1.1 201 Created
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 2173deb5-bbb7-4f0a-8236-f354784e3bae
-    Date: Tue, 27 Jun 2017 07:42:04 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-### Update Volume
-
-This API allows administrators to update volume info such as ownership and quota. This API requires administration privilege.
-
-Schema:
-
-- `PUT /{volume}?quota=<VOLUME_QUOTA>`
-
-Query Parameter:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| quota | long<BYTES \| MB \| GB \| TB>  \| remove | Optional. Quota size in BYTEs, MBs, GBs or TBs. Or use string value *remove* to remove an existing quota for a volume. |
-
-Sample HTTP PUT request:
-
-    curl -X PUT -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user: john"  http://localhost:9880/volume-to-update
-
-this request modifies the owner of */volume-to-update* to *john*.
-
-### Delete Volume
-
-This API allows user to delete a volume owned by themselves if the volume is not empty. Administrators can delete volumes owned by any user.
-
-Schema:
-
-- `DELETE /{volume}`
-
-Sample HTTP DELETE request:
-
-    curl -i -X DELETE -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user: bilbo"  http://localhost:9880/volume-to-delete
-
-this request deletes an empty volume */volume-to-delete*. The client receives a zero length content.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 6af14c64-e3a9-40fe-9634-df60b7cbbc6a
-    Date: Tue, 27 Jun 2017 08:49:52 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-### Info Volume
-
-This API allows user to read the info of a volume owned by themselves. Administrators can read volume info owned by any user.
-
-Schema:
-
-- `GET /{volume}?info=volume`
-
-Query Parameter:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| info | "volume" | Required and enforced with this value. |
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo?info=volume"
-
-this request gets the info of volume */volume-of-bilbo*, the client receives a response with a JSON object of volume info.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: a2224806-beaf-42dd-a68e-533cd7508f74
-    Date: Tue, 27 Jun 2017 07:55:35 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 171
-    Connection: keep-alive
-
-    {
-      "owner" : { "name" : "bilbo" },
-      "quota" : { "unit" : "TB", "size" : 1048576 },
-      "volumeName" : "volume-of-bilbo",
-      "createdOn" : "Tue, 27 Jun 2017 07:42:04 GMT",
-      "createdBy" : "root"
-    }
-
-### List Volumes
-
-This API allows user to list all volumes owned by themselves. Administrators can list all volumes owned by any user.
-
-Schema:
-
-- `GET /?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<PREVIOUS_VOLUME_KEY>`
-
-Query Parameter:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| prefix | string | Optional. Only volumes with this prefix are included in the result. |
-| max-keys | int | Optional. Maximum number of volumes included in the result. Default is 1024 if not specified. |
-| prev-key | string | Optional. Volume name from where listing should start, this key is excluded in the result. It must be a valid volume name. |
-| root-scan | bool | Optional. List all volumes in the cluster if this is set to true. Default false. |
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/?max-keys=100&prefix=Jan"
-
-this request gets all volumes owned by *bilbo* and each volume's name contains prefix *Jan*, the result at most contains *100* entries. The client receives a list of SON objects, each of them describes the info of a volume.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 7fa0dce1-a8bd-4387-bc3c-1dac4b710bb1
-    Date: Tue, 27 Jun 2017 08:07:04 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 602
-    Connection: keep-alive
-
-    {
-      "volumes" : [
-        {
-          "owner" : { "name" : "bilbo"},
-          "quota" : { "unit" : "TB", "size" : 2 },
-          "volumeName" : "Jan-vol1",
-          "createdOn" : "Tue, 27 Jun 2017 07:42:04 GMT",
-          "createdBy" : root
-      },
-      ...
-      ]
-    }
-
-Bucket APIs
---------------------
-
-### Create Bucket
-
-This API allows an user to create a bucket in a volume.
-
-Schema:
-
-- `POST /{volume}/{bucket}`
-
-Additional HTTP Headers:
-
-| HTTP Header | Value | Description |
-|:---- |:---- |:----
-| x-ozone-acl | ozone ACLs | Optional. Ozone acls. |
-| x-ozone-storage-class | <DEFAULT \| ARCHIVE \| DISK \| RAM_DISK \| SSD > | Optional. Storage type for a volume. |
-| x-ozone-bucket-versioning | enabled/disabled | Optional. Do enable bucket versioning or not. |
-
-Sample HTTP POST request:
-
-    curl -i -X POST -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" http://localhost:9880/volume-of-bilbo/bucket-0
-
-this request creates a bucket *bucket-0* under volume *volume-of-bilbo*.
-
-    HTTP/1.1 201 Created
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 49acfeec-4c85-470a-872b-2eaebd8d751e
-    Date: Tue, 27 Jun 2017 08:55:25 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-### Update Bucket
-
-Updates bucket meta-data, like ACLs.
-
-Schema:
-
-- `PUT /{volume}/{bucket}`
-
-Additional HTTP Headers:
-
-| HTTP Header | Value | Description |
-|:---- |:---- |:----
-| x-ozone-acl | ozone ACLs | Optional. Ozone acls. |
-| x-ozone-bucket-versioning | enabled/disabled | Optional. Do enable bucket versioning or not. |
-
-Sample HTTP PUT request:
-
-    curl -i -X PUT -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" -H "x-ozone-acl: ADD user:peregrin:rw" http://localhost:9880/volume-of-bilbo/bucket-to-update
-
-this request adds an ACL policy specified by HTTP header *x-ozone-acl* to bucket */volume-of-bilbo/bucket-to-update*, the ACL field *ADD user:peregrin:rw* gives add additional read/write permission to user *peregrin* to this bucket.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: b061a295-5faf-4b98-94b9-8b3e87c8eb5e
-    Date: Tue, 27 Jun 2017 09:02:37 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-### Delete Bucket
-
-Deletes a bucket if it is empty. An user can only delete bucket owned by themselves, and administrators can delete buckets owned by any user, as long as it is empty.
-
-Schema:
-
-- `DELETE /{volume}/{bucket}`
-
-Sample HTTP DELETE request:
-
-    curl -i -X DELETE -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user:bilbo" "http://localhost:9880/volume-of-bilbo/bucket-0"
-
-this request deletes bucket */volume-of-bilbo/bucket-0*. The client receives a zero length content response.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: f57acd7a-2116-4c2f-aa2f-5a483db81c9c
-    Date: Tue, 27 Jun 2017 09:16:52 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-
-### Info Bucket
-
-This API returns information about a given bucket.
-
-Schema:
-
-- `GET /{volume}/{bucket}?info=bucket`
-
-Query Parameters:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| info | "bucket" | Required and enforced with this value. |
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo/bucket-0?info=bucket"
-
-this request gets the info of bucket */volume-of-bilbo/bucket-0*. The client receives a response of JSON object contains bucket info.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: f125485b-8cae-4c7f-a2d6-5b1fefd6f193
-    Date: Tue, 27 Jun 2017 09:08:31 GMT
-    Content-Type: application/json
-    Content-Length: 138
-    Connection: keep-alive
-
-    {
-      "volumeName" : "volume-of-bilbo",
-      "bucketName" : "bucket-0",
-      "createdOn" : "Tue, 27 Jun 2017 08:55:25 GMT",
-      "acls" : [ ],
-      "versioning" : "DISABLED",
-      "storageType" : "DISK"
-    }
-
-### List Buckets
-
-List buckets in a given volume.
-
-Schema:
-
-- `GET /{volume}?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<PREVIOUS_BUCKET_KEY>`
-
-Query Parameters:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| prefix | string | Optional. Only buckets with this prefix are included in the result. |
-| max-keys | int | Optional. Maximum number of buckets included in the result. Default is 1024 if not specified. |
-| prev-key | string | Optional. Bucket name from where listing should start, this key is excluded in the result. It must be a valid bucket name. |
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo?max-keys=10"
-
-this request lists all the buckets under volume *volume-of-bilbo*, and the result at most contains 10 entries. The client receives response of a array of JSON objects, each of them represents for a bucket info.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: e048c3d5-169c-470f-9903-632d9f9e32d5
-    Date: Tue, 27 Jun 2017 09:12:18 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 207
-    Connection: keep-alive
-
-    {
-      "buckets" : [ {
-        "volumeName" : "volume-of-bilbo",
-        "bucketName" : "bucket-0",
-        "createdOn" : "Tue, 27 Jun 2017 08:55:25 GMT",
-        "acls" : [ ],
-        "versioning" : null,
-        "storageType" : "DISK",
-        "bytesUsed" : 0,
-        "keyCount" : 0
-        },
-        ...
-      ]
-    }
-
-Key APIs
-------------------
-
-### Put Key
-
-This API allows user to create or overwrite keys inside of a bucket.
-
-Schema:
-
-- `PUT /{volume}/{bucket}/{key}`
-
-Additional HTTP headers:
-
-| HTTP Header | Value | Description |
-|:---- |:---- |:----
-| Content-MD5 | MD5 digest | Standard HTTP header, file hash. |
-
-Sample PUT HTTP request:
-
-    curl -X PUT -T /path/to/localfile -H "Authorization:OZONE" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user:bilbo" "http://localhost:9880/volume-of-bilbo/bucket-0/file-0"
-
-this request uploads a local file */path/to/localfile* specified by option *-T* to ozone as user *bilbo*, mapped to ozone key */volume-of-bilbo/bucket-0/file-0*. The client receives a zero length content response.
-
-### Get Key
-
-This API allows user to get or download a key from an ozone bucket.
-
-Schema:
-
-- `GET /{volume}/{bucket}/{key}`
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo/bucket-0/file-0"
-
-this request reads the content of key */volume-of-bilbo/bucket-0/file-0*. If the content of the file is plain text, it can be directly dumped onto stdout.
-
-    HTTP/1.1 200 OK
-    Content-Type: application/octet-stream
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 1bcd7de7-d8e3-46bb-afee-bdc933d383b8
-    Date: Tue, 27 Jun 2017 09:35:29 GMT
-    Content-Length: 6
-    Connection: keep-alive
-
-    Hello Ozone!
-
-if the file is not plain text, specify *-O* option in curl command and the file *file-0* will be downloaded into current working directory, file name will be same as the key. A sample request like following:
-
-    curl -O -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo/bucket-0/file-1"
-
-response looks like following:
-
-    % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
-                                 Dload  Upload   Total   Spent    Left  Speed
-    100 6148k  100 6148k    0     0  24.0M      0 --:--:-- --:--:-- --:--:-- 24.1M
-
-### Delete Key
-
-This API allows user to delete a key from a bucket.
-
-Schema:
-
-- `DELETE /{volume}/{bucket}/{key}`
-
-Sample HTTP DELETE request:
-
-    curl -i -X DELETE -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user:bilbo" "http://localhost:9880/volume-of-bilbo/bucket-0/file-0"
-
-this request deletes key */volume-of-bilbo/bucket-0/file-0*. The client receives a zero length content result:
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: f8c4a373-dd5f-4e3a-b6c4-ddf7e191fe91
-    Date: Tue, 27 Jun 2017 14:19:48 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-### Info Key
-
-This API returns information about a given key.
-
-Schema:
-
-- `GET /{volume}/{bucket}/{key}?info=key`
-
-Query Parameter:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| info | String, "key" | Required and enforced with this value. |
-
-Sample HTTP DELETE request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo/buket-0/file-0?info=key"
-
-this request returns information of the key */volume-of-bilbo/bucket-0/file-0*. The client receives a JSON object listed attributes of the key.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: c674343c-a0f2-49e4-bbd6-daa73e7dc131
-    Date: Mon, 03 Jul 2017 14:28:45 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 73
-    Connection: keep-alive
-
-    {
-      "version" : 0,
-      "md5hash" : null,
-      "createdOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
-      "modifiedOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
-      "size" : 0,
-      "keyName" : "file-0"
-    }
-
-### List Keys
-
-This API allows user to list keys in a bucket.
-
-Schema:
-
-- `GET /{volume}/{bucket}?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<PREVIOUS_KEY>`
-
-Query Parameters:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| prefix | string | Optional. Only keys with this prefix are included in the result. |
-| max-keys | int | Optional. Maximum number of keys included in the result. Default is 1024 if not specified. |
-| prev-key | string | Optional. Key name from where listing should start, this key is excluded in the result. It must be a valid key name. |
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http:/localhost:9880/volume-of-bilbo/bucket-0/?max-keys=100&prefix=file"
-
-this request list keys under bucket */volume-of-bilbo/bucket-0*, the listing result is filtered by prefix *file*. The client receives an array of JSON objects, each of them represents the info of a matched key.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 7f9fc970-9904-4c56-b671-83a086c6f555
-    Date: Tue, 27 Jun 2017 09:48:59 GMT
-    Content-Type: application/json
-    Content-Length: 209
-    Connection: keep-alive
-
-    {
-      "name" : null,
-      "prefix" : file,
-      "maxKeys" : 0,
-      "truncated" : false,
-      "keyList" : [ {
-          "version" : 0,
-          "md5hash" : null,
-          "createdOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
-          "modifiedOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
-          "size" : 0,
-          "keyName" : "file-0"
-          },
-          ...
-       ]
-    }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index 6687382..f605da2 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -36,6 +36,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <module>tools</module>
     <module>integration-test</module>
     <module>objectstore-service</module>
+    <module>docs</module>
   </modules>
 
   <dependencies>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index bcb816e..a916108 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -573,6 +573,11 @@
         <artifactId>hadoop-ozone-objectstore-service</artifactId>
         <version>${hdds.version}</version>
       </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-ozone-docs</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css.map
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css.map b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css.map
new file mode 100644
index 0000000..6c7fa40
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css.map
@@ -0,0 +1 @@
+{"version":3,"sources":["less/normalize.less","less/print.less","bootstrap.css","dist/css/bootstrap.css","less/glyphicons.less","less/scaffolding.less","less/mixins/vendor-prefixes.less","less/mixins/tab-focus.less","less/mixins/image.less","less/type.less","less/mixins/text-emphasis.less","less/mixins/background-variant.less","less/mixins/text-overflow.less","less/code.less","less/grid.less","less/mixins/grid.less","less/mixins/grid-framework.less","less/tables.less","less/mixins/table-row.less","less/forms.less","less/mixins/forms.less","less/buttons.less","less/mixins/buttons.less","less/mixins/opacity.less","less/component-animations.less","less/dropdowns.less","less/mixins/nav-divider.less","less/mixins/reset-filter.less","less/button-groups.less","less/mixins/border-radius.less","less/input-groups.less","less/navs.less","less/navbar.less","less/mixins/nav-vertical-align.less","less/utilities.less","less/breadcrumbs.less","less/pagination.less","less/mixins/pagination.less","le
 ss/pager.less","less/labels.less","less/mixins/labels.less","less/badges.less","less/jumbotron.less","less/thumbnails.less","less/alerts.less","less/mixins/alerts.less","less/progress-bars.less","less/mixins/gradients.less","less/mixins/progress-bar.less","less/media.less","less/list-group.less","less/mixins/list-group.less","less/panels.less","less/mixins/panels.less","less/responsive-embed.less","less/wells.less","less/close.less","less/modals.less","less/tooltip.less","less/mixins/reset-text.less","less/popovers.less","less/carousel.less","less/mixins/clearfix.less","less/mixins/center-block.less","less/mixins/hide-text.less","less/responsive-utilities.less","less/mixins/responsive-visibility.less"],"names":[],"mappings":";;;;4EAQA,KACE,YAAA,WACA,yBAAA,KACA,qBAAA,KAOF,KACE,OAAA,EAaF,QAAA,MAAA,QAAA,WAAA,OAAA,OAAA,OAAA,OAAA,KAAA,KAAA,IAAA,QAAA,QAaE,QAAA,MAQF,MAAA,OAAA,SAAA,MAIE,QAAA,aACA,eAAA,SAQF,sBACE,QAAA,KACA,OAAA,EAQF,SAAA,SAEE,QAAA,KAUF,EACE,iBAAA,YAQF,SAAA,QAEE,QAAA,EAUF,YAC
 E,cAAA,IAAA,OAOF,EAAA,OAEE,YAAA,IAOF,IACE,WAAA,OAQF,GACE,OAAA,MAAA,EACA,UAAA,IAOF,KACE,MAAA,KACA,WAAA,KAOF,MACE,UAAA,IAOF,IAAA,IAEE,SAAA,SACA,UAAA,IACA,YAAA,EACA,eAAA,SAGF,IACE,IAAA,MAGF,IACE,OAAA,OAUF,IACE,OAAA,EAOF,eACE,SAAA,OAUF,OACE,OAAA,IAAA,KAOF,GACE,OAAA,EAAA,mBAAA,YAAA,gBAAA,YACA,WAAA,YAOF,IACE,SAAA,KAOF,KAAA,IAAA,IAAA,KAIE,YAAA,UAAA,UACA,UAAA,IAkBF,OAAA,MAAA,SAAA,OAAA,SAKE,OAAA,EACA,KAAA,QACA,MAAA,QAOF,OACE,SAAA,QAUF,OAAA,OAEE,eAAA,KAWF,OAAA,wBAAA,kBAAA,mBAIE,mBAAA,OACA,OAAA,QAOF,iBAAA,qBAEE,OAAA,QAOF,yBAAA,wBAEE,QAAA,EACA,OAAA,EAQF,MACE,YAAA,OAWF,qBAAA,kBAEE,mBAAA,WAAA,gBAAA,WAAA,WAAA,WACA,QAAA,EASF,8CAAA,8CAEE,OAAA,KAQF,mBACE,mBAAA,YACA,gBAAA,YAAA,WAAA,YAAA,mBAAA,UASF,iDAAA,8CAEE,mBAAA,KAOF,SACE,QAAA,MAAA,OAAA,MACA,OAAA,EAAA,IACA,OAAA,IAAA,MAAA,OAQF,OACE,QAAA,EACA,OAAA,EAOF,SACE,SAAA,KAQF,SACE,YAAA,IAUF,MACE,eAAA,EACA,gBAAA,SAGF,GAAA,GAEE,QAAA,uFCjUF,aA7FI,EAAA,OAAA,QAGI,MAAA,eACA,YAAA,eACA,WAAA,cAAA,mBAAA,eACA,WAAA,eAGJ,EAAA,UAEI,gBAAA,UAGJ,cACI,QAAA,KAAA,WAAA,IAGJ,kBACI
 ,QAAA,KAAA,YAAA,IAKJ,6BAAA,mBAEI,QAAA,GAGJ,WAAA,IAEI,OAAA,IAAA,MAAA,KC4KL,kBAAA,MDvKK,MC0KL,QAAA,mBDrKK,IE8KN,GDLC,kBAAA,MDrKK,ICwKL,UAAA,eCUD,GF5KM,GE2KN,EF1KM,QAAA,ECuKL,OAAA,ECSD,GF3KM,GCsKL,iBAAA,MD/JK,QCkKL,QAAA,KCSD,YFtKU,oBCiKT,iBAAA,eD7JK,OCgKL,OAAA,IAAA,MAAA,KD5JK,OC+JL,gBAAA,mBCSD,UFpKU,UC+JT,iBAAA,eDzJS,mBEkKV,mBDLC,OAAA,IAAA,MAAA,gBEjPD,WACA,YAAA,uBFsPD,IAAA,+CE7OC,IAAK,sDAAuD,4BAA6B,iDAAkD,gBAAiB,gDAAiD,eAAgB,+CAAgD,mBAAoB,2EAA4E,cAE7W,WACA,SAAA,SACA,IAAA,IACA,QAAA,aACA,YAAA,uBACA,WAAA,OACA,YAAA,IACA,YAAA,EAIkC,uBAAA,YAAW,wBAAA,UACX,2BAAW,QAAA,QAEX,uBDuPlC,QAAS,QCtPyB,sBFiPnC,uBEjP8C,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,2BAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,6BAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,2BAAW,QAAA,QACX,qBAAW,QAAA,QACX,0BAAW,QAAA,QACX,qBAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,2BAAW,QAAA,QACX,sBAAW,QAAA,QACX,yBAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX
 ,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,+BAAW,QAAA,QACX,2BAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,8BAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,2BAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,6BAAW,QAAA,QACX,6BAAW,QAAA,QACX,8BAAW,QAAA,QACX,4BAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,sBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,2BAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,yBAAW,QAAA,QACX,8BAAW,QAAA,QACX,6BAAW,QAAA,QACX,6BAAW,QAAA,QACX,+BAAW,QAAA,QACX,8BAAW,QAAA,QACX,gCAAW,QAAA,QACX,uBAAW,QAAA,QACX,8BAAW,QAAA,QACX,+BAAW,QAAA,QACX,iCAAW,QAAA,QACX,0BAAW,QAAA,QACX,6BAAW,QAAA,QACX,yBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,gCAAW,QAAA,QACX,gCAAW,QAAA,QACX,2BAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,0BAAW,QAAA,QACX,+BAAW,QAAA,QACX,+BAAW,QAAA,QACX,wBAAW,QAAA,QACX,+BAAW,QAAA,QACX,gCAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,8BAAW,QAAA,QACX,0BAAW,QAAA,QACX,gCAA
 W,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,gCAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,6BAAW,QAAA,QACX,8BAAW,QAAA,QACX,2BAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,8BAAW,QAAA,QACX,+BAAW,QAAA,QACX,mCAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,2BAAW,QAAA,QACX,4BAAW,QAAA,QACX,+BAAW,QAAA,QACX,wBAAW,QAAA,QACX,2BAAW,QAAA,QACX,yBAAW,QAAA,QACX,0BAAW,QAAA,QACX,yBAAW,QAAA,QACX,6BAAW,QAAA,QACX,+BAAW,QAAA,QACX,0BAAW,QAAA,QACX,gCAAW,QAAA,QACX,+BAAW,QAAA,QACX,8BAAW,QAAA,QACX,kCAAW,QAAA,QACX,oCAAW,QAAA,QACX,sBAAW,QAAA,QACX,2BAAW,QAAA,QACX,uBAAW,QAAA,QACX,8BAAW,QAAA,QACX,4BAAW,QAAA,QACX,8BAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,0BAAW,QAAA,QACX,4BAAW,QAAA,QACX,qCAAW,QAAA,QACX,oCAAW,QAAA,QACX,kCAAW,QAAA,QACX,oCAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,8BAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,0BAAW,QAAA,QACX,sBAAW,QAAA,QACX,sBAAW,QAAA,QACX,uBAAW,QAAA,QACX,mCAAW,QAA
 A,QACX,uCAAW,QAAA,QACX,gCAAW,QAAA,QACX,oCAAW,QAAA,QACX,qCAAW,QAAA,QACX,yCAAW,QAAA,QACX,4BAAW,QAAA,QACX,yBAAW,QAAA,QACX,gCAAW,QAAA,QACX,8BAAW,QAAA,QACX,yBAAW,QAAA,QACX,wBAAW,QAAA,QACX,0BAAW,QAAA,QACX,6BAAW,QAAA,QACX,yBAAW,QAAA,QACX,uBAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,yBAAW,QAAA,QACX,yBAAW,QAAA,QACX,uBAAW,QAAA,QACX,8BAAW,QAAA,QACX,+BAAW,QAAA,QACX,gCAAW,QAAA,QACX,8BAAW,QAAA,QACX,8BAAW,QAAA,QACX,8BAAW,QAAA,QACX,2BAAW,QAAA,QACX,0BAAW,QAAA,QACX,yBAAW,QAAA,QACX,6BAAW,QAAA,QACX,2BAAW,QAAA,QACX,4BAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,2BAAW,QAAA,QACX,2BAAW,QAAA,QACX,4BAAW,QAAA,QACX,+BAAW,QAAA,QACX,8BAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,iCAAW,QAAA,QACX,oCAAW,QAAA,QACX,iCAAW,QAAA,QACX,+BAAW,QAAA,QACX,+BAAW,QAAA,QACX,iCAAW,QAAA,QACX,qBAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,2BAAW,QAAA,QACX,uBAAW,QAAA,QASX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,4BAAW,QAAA,QACX,uBAAW,QAAA,QACX,wBAAW,QAAA,QACX,uBAAW,QAAA,QACX,yBAAW,QAAA,QACX,yBAAW,QAAA,QACX,+BAAW,QAAA,QAC
 X,uBAAW,QAAA,QACX,6BAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,wBAAW,QAAA,QACX,4BAAW,QAAA,QACX,uBAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,2BAAW,QAAA,QACX,0BAAW,QAAA,QACX,sBAAW,QAAA,QACX,sBAAW,QAAA,QACX,sBAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,4BAAW,QAAA,QACX,mCAAW,QAAA,QACX,4BAAW,QAAA,QACX,oCAAW,QAAA,QACX,kCAAW,QAAA,QACX,iCAAW,QAAA,QACX,+BAAW,QAAA,QACX,sBAAW,QAAA,QACX,wBAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,kCAAW,QAAA,QACX,mCAAW,QAAA,QACX,sCAAW,QAAA,QACX,0CAAW,QAAA,QACX,oCAAW,QAAA,QACX,wCAAW,QAAA,QACX,qCAAW,QAAA,QACX,iCAAW,QAAA,QACX,gCAAW,QAAA,QACX,kCAAW,QAAA,QACX,+BAAW,QAAA,QACX,0BAAW,QAAA,QACX,8BAAW,QAAA,QACX,4BAAW,QAAA,QACX,4BAAW,QAAA,QACX,6BAAW,QAAA,QACX,4BAAW,QAAA,QCtS/C,0BCgEE,QAAA,QHi+BF,EDNC,mBAAA,WGxhCI,gBAAiB,WFiiCZ,WAAY,WGl+BZ,OADL,QJg+BJ,mBAAA,WGthCI,gBAAiB,WACpB,WAAA,WHyhCD,KGrhCC,UAAW,KAEX,4BAAA,cAEA,KACA,YAAA,iBAAA,UAAA,MAAA,WHuhCD,UAAA,KGnhCC,YAAa,WF4hCb,MAAO,KACP,iBAAkB,KExhClB,OADA,MAEA,OHqhCD,S
 G/gCC,YAAa,QACb,UAAA,QACA,YAAA,QAEA,EFwhCA,MAAO,QEthCL,gBAAA,KAIF,QH8gCD,QKjkCC,MAAA,QACA,gBAAA,UF6DF,QACE,QAAA,IAAA,KAAA,yBHygCD,eAAA,KGlgCC,OHqgCD,OAAA,ECSD,IACE,eAAgB,ODDjB,4BM/kCC,0BLklCF,gBKnlCE,iBADA,eH4EA,QAAS,MACT,UAAA,KHugCD,OAAA,KGhgCC,aACA,cAAA,IAEA,eACA,QAAA,aC6FA,UAAA,KACK,OAAA,KACG,QAAA,IEvLR,YAAA,WACA,iBAAA,KACA,OAAA,IAAA,MAAA,KN+lCD,cAAA,IGjgCC,mBAAoB,IAAI,IAAI,YAC5B,cAAA,IAAA,IAAA,YHmgCD,WAAA,IAAA,IAAA,YG5/BC,YACA,cAAA,IAEA,GH+/BD,WAAA,KGv/BC,cAAe,KACf,OAAA,EACA,WAAA,IAAA,MAAA,KAEA,SACA,SAAA,SACA,MAAA,IACA,OAAA,IACA,QAAA,EHy/BD,OAAA,KGj/BC,SAAA,OF0/BA,KAAM,cEx/BJ,OAAA,EAEA,0BACA,yBACA,SAAA,OACA,MAAA,KHm/BH,OAAA,KGx+BC,OAAQ,EACR,SAAA,QH0+BD,KAAA,KCSD,cACE,OAAQ,QAQV,IACA,IMlpCE,IACA,IACA,IACA,INwoCF,GACA,GACA,GACA,GACA,GACA,GDAC,YAAA,QOlpCC,YAAa,IN2pCb,YAAa,IACb,MAAO,QAoBT,WAZA,UAaA,WAZA,UM5pCI,WN6pCJ,UM5pCI,WN6pCJ,UM5pCI,WN6pCJ,UDMC,WCLD,UACA,UAZA,SAaA,UAZA,SAaA,UAZA,SAaA,UAZA,SAaA,UAZA,SAaA,UAZA,SMppCE,YAAa,INwqCb,YAAa,EACb,MAAO,KAGT,IMxqCE,IAJF,IN2qCA,GAEA,GDLC,GCS
 C,WAAY,KACZ,cAAe,KASjB,WANA,UDCC,WCCD,UM5qCA,WN8qCA,UACA,UANA,SM5qCI,UN8qCJ,SM3qCA,UN6qCA,SAQE,UAAW,IAGb,IMprCE,IAJF,INurCA,GAEA,GDLC,GCSC,WAAY,KACZ,cAAe,KASjB,WANA,UDCC,WCCD,UMvrCA,WNyrCA,UACA,UANA,SMxrCI,UN0rCJ,SMtrCA,UNwrCA,SMxrCU,UAAA,IACV,IAAA,GAAU,UAAA,KACV,IAAA,GAAU,UAAA,KACV,IAAA,GAAU,UAAA,KACV,IAAA,GAAU,UAAA,KACV,IAAA,GAAU,UAAA,KAOR,IADF,GPssCC,UAAA,KCSD,EMzsCE,OAAA,EAAA,EAAA,KAEA,MPosCD,cAAA,KO/rCC,UAAW,KAwOX,YAAa,IA1OX,YAAA,IPssCH,yBO7rCC,MNssCE,UAAW,MMjsCf,OAAA,MAEE,UAAA,IAKF,MP0rCC,KO1rCsB,QAAA,KP6rCtB,iBAAA,QO5rCsB,WP+rCtB,WAAA,KO9rCsB,YPisCtB,WAAA,MOhsCsB,aPmsCtB,WAAA,OOlsCsB,cPqsCtB,WAAA,QOlsCsB,aPqsCtB,YAAA,OOpsCsB,gBPusCtB,eAAA,UOtsCsB,gBPysCtB,eAAA,UOrsCC,iBPwsCD,eAAA,WQ3yCC,YR8yCD,MAAA,KCSD,cOpzCI,MAAA,QAHF,qBDwGF,qBP6sCC,MAAA,QCSD,cO3zCI,MAAA,QAHF,qBD2GF,qBPitCC,MAAA,QCSD,WOl0CI,MAAA,QAHF,kBD8GF,kBPqtCC,MAAA,QCSD,cOz0CI,MAAA,QAHF,qBDiHF,qBPytCC,MAAA,QCSD,aOh1CI,MAAA,QDwHF,oBAHF,oBExHE,MAAA,QACA,YR01CA,MAAO,KQx1CL,iBAAA,QAHF,mBF8HF,mBP2tCC,iBAAA,QCSD,YQ/1CI,iBAAA
 ,QAHF,mBFiIF,mBP+tCC,iBAAA,QCSD,SQt2CI,iBAAA,QAHF,gBFoIF,gBPmuCC,iBAAA,QCSD,YQ72CI,iBAAA,QAHF,mBFuIF,mBPuuCC,iBAAA,QCSD,WQp3CI,iBAAA,QF6IF,kBADF,kBAEE,iBAAA,QPsuCD,aO7tCC,eAAgB,INsuChB,OAAQ,KAAK,EAAE,KMpuCf,cAAA,IAAA,MAAA,KAFF,GPkuCC,GCSC,WAAY,EACZ,cAAe,KM9tCf,MP0tCD,MO3tCD,MAPI,MASF,cAAA,EAIF,eALE,aAAA,EACA,WAAA,KPkuCD,aO9tCC,aAAc,EAKZ,YAAA,KACA,WAAA,KP6tCH,gBOvtCC,QAAS,aACT,cAAA,IACA,aAAA,IAEF,GNguCE,WAAY,EM9tCZ,cAAA,KAGA,GADF,GP0tCC,YAAA,WOttCC,GPytCD,YAAA,IOnnCD,GAvFM,YAAA,EAEA,yBACA,kBGtNJ,MAAA,KACA,MAAA,MACA,SAAA,OVq6CC,MAAA,KO7nCC,WAAY,MAhFV,cAAA,SPgtCH,YAAA,OOtsCD,kBNgtCE,YAAa,OM1sCjB,0BPssCC,YOrsCC,OAAA,KA9IqB,cAAA,IAAA,OAAA,KAmJvB,YACE,UAAA,IACA,eAAA,UAEA,WPssCD,QAAA,KAAA,KOjsCG,OAAA,EAAA,EAAA,KN0sCF,UAAW,OACX,YAAa,IAAI,MAAM,KMptCzB,yBP+sCC,wBO/sCD,yBNytCE,cAAe,EMnsCb,kBAFA,kBACA,iBPksCH,QAAA,MO/rCG,UAAA,INwsCF,YAAa,WACb,MAAO,KMhsCT,yBP2rCC,yBO3rCD,wBAEE,QAAA,cAEA,oBACA,sBACA,cAAA,KP6rCD,aAAA,EOvrCG,WAAA,MNgsCF,aAAc,IAAI,MAAM,KACxB,YAAa,EMhsCX,kCNksCJ,kCMnsCe,iCACX,oCNmsCJ
 ,oCDLC,mCCUC,QAAS,GMjsCX,iCNmsCA,iCMzsCM,gCAOJ,mCNmsCF,mCDLC,kCO7rCC,QAAA,cPksCD,QWv+CC,cAAe,KVg/Cf,WAAY,OACZ,YAAa,WU7+Cb,KXy+CD,IWr+CD,IACE,KACA,YAAA,MAAA,OAAA,SAAA,cAAA,UAEA,KACA,QAAA,IAAA,IXu+CD,UAAA,IWn+CC,MAAO,QACP,iBAAA,QACA,cAAA,IAEA,IACA,QAAA,IAAA,IACA,UAAA,IV4+CA,MU5+CA,KXq+CD,iBAAA,KW3+CC,cAAe,IASb,mBAAA,MAAA,EAAA,KAAA,EAAA,gBACA,WAAA,MAAA,EAAA,KAAA,EAAA,gBAEA,QV6+CF,QU7+CE,EXq+CH,UAAA,KWh+CC,YAAa,IACb,mBAAA,KACA,WAAA,KAEA,IACA,QAAA,MACA,QAAA,MACA,OAAA,EAAA,EAAA,KACA,UAAA,KACA,YAAA,WACA,MAAA,KACA,WAAA,UXk+CD,UAAA,WW7+CC,iBAAkB,QAehB,OAAA,IAAA,MAAA,KACA,cAAA,IAEA,SACA,QAAA,EACA,UAAA,QXi+CH,MAAA,QW59CC,YAAa,SACb,iBAAA,YACA,cAAA,EC1DF,gBCHE,WAAA,MACA,WAAA,OAEA,Wb8hDD,cAAA,KYxhDC,aAAA,KAqEA,aAAc,KAvEZ,YAAA,KZ+hDH,yBY1hDC,WAkEE,MAAO,OZ69CV,yBY5hDC,WA+DE,MAAO,OZk+CV,0BYzhDC,WCvBA,MAAA,QAGA,iBbmjDD,cAAA,KYthDC,aAAc,KCvBd,aAAA,KACA,YAAA,KCAE,KACE,aAAA,MAEA,YAAA,MAGA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,U
 AAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UdgjDL,SAAA,SchiDG,WAAA,IACE,cAAA,KdkiDL,aAAA,Kc1hDG,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,Ud6hDH,MAAA,Kc7hDG,WdgiDH,MAAA,KchiDG,WdmiDH,MAAA,acniDG,WdsiDH,MAAA,actiDG,UdyiDH,MAAA,IcziDG,Ud4iDH,MAAA,ac5iDG,Ud+iDH,MAAA,ac/iDG,UdkjDH,MAAA,IcljDG,UdqjDH,MAAA,acrjDG,UdwjDH,MAAA,acxjDG,Ud2jDH,MAAA,Ic3jDG,Ud8jDH,MAAA,ac/iDG,UdkjDH,MAAA,YcljDG,gBdqjDH,MAAA,KcrjDG,gBdwjDH,MAAA,acxjDG,gBd2jDH,MAAA,ac3jDG,ed8jDH,MAAA,Ic9jDG,edikDH,MAAA,acjkDG,edokDH,MAAA,acpkDG,edukDH,MAAA,IcvkDG,ed0kDH,MAAA,ac1kDG,ed6kDH,MAAA,ac7kDG,edglDH,MAAA,IchlDG,edmlDH,MAAA,ac9kDG,edilDH,MAAA,YchmDG,edmmDH,MAAA,KcnmDG,gBdsmDH,KAAA,KctmDG,gBdymDH,KAAA,aczmDG,gBd4mDH,KAAA,ac5mDG,ed+mDH,KAAA,Ic/mDG,edknDH,KAAA,aclnDG,edqnDH,KAAA,acrnDG,edwnDH,KAAA,IcxnDG,ed2nDH,KAAA,ac3nDG,ed8nDH,KAAA,ac9nDG,edioDH,KAAA,IcjoDG,edooDH,KAAA,ac/nDG,edkoDH,KAAA,YcnnDG,edsnDH,KAAA,KctnDG,kBdynDH,YAAA,KcznDG,kBd
 4nDH,YAAA,ac5nDG,kBd+nDH,YAAA,ac/nDG,iBdkoDH,YAAA,IcloDG,iBdqoDH,YAAA,acroDG,iBdwoDH,YAAA,acxoDG,iBd2oDH,YAAA,Ic3oDG,iBd8oDH,YAAA,ac9oDG,iBdipDH,YAAA,acjpDG,iBdopDH,YAAA,IcppDG,iBdupDH,YAAA,acvpDG,iBd0pDH,YAAA,Yc5rDG,iBACE,YAAA,EAOJ,yBACE,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,Ud0rDD,MAAA,Kc1rDC,Wd6rDD,MAAA,Kc7rDC,WdgsDD,MAAA,achsDC,WdmsDD,MAAA,acnsDC,UdssDD,MAAA,IctsDC,UdysDD,MAAA,aczsDC,Ud4sDD,MAAA,ac5sDC,Ud+sDD,MAAA,Ic/sDC,UdktDD,MAAA,acltDC,UdqtDD,MAAA,acrtDC,UdwtDD,MAAA,IcxtDC,Ud2tDD,MAAA,ac5sDC,Ud+sDD,MAAA,Yc/sDC,gBdktDD,MAAA,KcltDC,gBdqtDD,MAAA,acrtDC,gBdwtDD,MAAA,acxtDC,ed2tDD,MAAA,Ic3tDC,ed8tDD,MAAA,ac9tDC,ediuDD,MAAA,acjuDC,edouDD,MAAA,IcpuDC,eduuDD,MAAA,acvuDC,ed0uDD,MAAA,ac1uDC,ed6uDD,MAAA,Ic7uDC,edgvDD,MAAA,ac3uDC,ed8uDD,MAAA,Yc7vDC,edgwDD,MAAA,KchwDC,gBdmwDD,KAAA,KcnwDC,gBdswDD,KAAA,actwDC,gBdywDD,KAAA,aczwDC,ed4wDD,KAAA,Ic5wDC,ed+wDD,KAAA,ac/wDC,edkxDD,KAAA,aclxDC,edqxDD,KAAA,IcrxDC,edwxDD,KAAA,acxxDC,ed2xDD,KAAA,ac3xDC,ed8xDD,KAAA,Ic9xDC,ediyDD,KAAA,a
 c5xDC,ed+xDD,KAAA,YchxDC,edmxDD,KAAA,KcnxDC,kBdsxDD,YAAA,KctxDC,kBdyxDD,YAAA,aczxDC,kBd4xDD,YAAA,ac5xDC,iBd+xDD,YAAA,Ic/xDC,iBdkyDD,YAAA,aclyDC,iBdqyDD,YAAA,acryDC,iBdwyDD,YAAA,IcxyDC,iBd2yDD,YAAA,ac3yDC,iBd8yDD,YAAA,ac9yDC,iBdizDD,YAAA,IcjzDC,iBdozDD,YAAA,acpzDC,iBduzDD,YAAA,YY9yDD,iBE3CE,YAAA,GAQF,yBACE,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,Udw1DD,MAAA,Kcx1DC,Wd21DD,MAAA,Kc31DC,Wd81DD,MAAA,ac91DC,Wdi2DD,MAAA,acj2DC,Udo2DD,MAAA,Icp2DC,Udu2DD,MAAA,acv2DC,Ud02DD,MAAA,ac12DC,Ud62DD,MAAA,Ic72DC,Udg3DD,MAAA,ach3DC,Udm3DD,MAAA,acn3DC,Uds3DD,MAAA,Ict3DC,Udy3DD,MAAA,ac12DC,Ud62DD,MAAA,Yc72DC,gBdg3DD,MAAA,Kch3DC,gBdm3DD,MAAA,acn3DC,gBds3DD,MAAA,act3DC,edy3DD,MAAA,Icz3DC,ed43DD,MAAA,ac53DC,ed+3DD,MAAA,ac/3DC,edk4DD,MAAA,Icl4DC,edq4DD,MAAA,acr4DC,edw4DD,MAAA,acx4DC,ed24DD,MAAA,Ic34DC,ed84DD,MAAA,acz4DC,ed44DD,MAAA,Yc35DC,ed85DD,MAAA,Kc95DC,gBdi6DD,KAAA,Kcj6DC,gBdo6DD,KAAA,acp6DC,gBdu6DD,KAAA,acv6DC,ed06DD,KAAA,Ic16DC,ed66DD,KAAA,ac76DC,edg7DD,KAAA,ach7DC,edm7DD,KAAA,Icn7DC,ed
 s7DD,KAAA,act7DC,edy7DD,KAAA,acz7DC,ed47DD,KAAA,Ic57DC,ed+7DD,KAAA,ac17DC,ed67DD,KAAA,Yc96DC,edi7DD,KAAA,Kcj7DC,kBdo7DD,YAAA,Kcp7DC,kBdu7DD,YAAA,acv7DC,kBd07DD,YAAA,ac17DC,iBd67DD,YAAA,Ic77DC,iBdg8DD,YAAA,ach8DC,iBdm8DD,YAAA,acn8DC,iBds8DD,YAAA,Ict8DC,iBdy8DD,YAAA,acz8DC,iBd48DD,YAAA,ac58DC,iBd+8DD,YAAA,Ic/8DC,iBdk9DD,YAAA,acl9DC,iBdq9DD,YAAA,YYz8DD,iBE9CE,YAAA,GAQF,0BACE,UAAA,WAAA,WAAA,WAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,UAAA,Uds/DD,MAAA,Kct/DC,Wdy/DD,MAAA,Kcz/DC,Wd4/DD,MAAA,ac5/DC,Wd+/DD,MAAA,ac//DC,UdkgED,MAAA,IclgEC,UdqgED,MAAA,acrgEC,UdwgED,MAAA,acxgEC,Ud2gED,MAAA,Ic3gEC,Ud8gED,MAAA,ac9gEC,UdihED,MAAA,acjhEC,UdohED,MAAA,IcphEC,UduhED,MAAA,acxgEC,Ud2gED,MAAA,Yc3gEC,gBd8gED,MAAA,Kc9gEC,gBdihED,MAAA,acjhEC,gBdohED,MAAA,acphEC,eduhED,MAAA,IcvhEC,ed0hED,MAAA,ac1hEC,ed6hED,MAAA,ac7hEC,edgiED,MAAA,IchiEC,edmiED,MAAA,acniEC,edsiED,MAAA,actiEC,edyiED,MAAA,IcziEC,ed4iED,MAAA,acviEC,ed0iED,MAAA,YczjEC,ed4jED,MAAA,Kc5jEC,gBd+jED,KAAA,Kc/jEC,gBdkkED,KAAA,aclkEC,gBdqkED,KAAA,acrkEC,edwkED,KAA
 A,IcxkEC,ed2kED,KAAA,ac3kEC,ed8kED,KAAA,ac9kEC,edilED,KAAA,IcjlEC,edolED,KAAA,acplEC,edulED,KAAA,acvlEC,ed0lED,KAAA,Ic1lEC,ed6lED,KAAA,acxlEC,ed2lED,KAAA,Yc5kEC,ed+kED,KAAA,Kc/kEC,kBdklED,YAAA,KcllEC,kBdqlED,YAAA,acrlEC,kBdwlED,YAAA,acxlEC,iBd2lED,YAAA,Ic3lEC,iBd8lED,YAAA,ac9lEC,iBdimED,YAAA,acjmEC,iBdomED,YAAA,IcpmEC,iBdumED,YAAA,acvmEC,iBd0mED,YAAA,ac1mEC,iBd6mED,YAAA,Ic7mEC,iBdgnED,YAAA,achnEC,iBdmnED,YAAA,YetrED,iBACA,YAAA,GAGA,MACA,iBAAA,YAEA,QfyrED,YAAA,IevrEC,eAAgB,IAChB,MAAA,KfyrED,WAAA,KelrEC,GACA,WAAA,KfsrED,OexrEC,MAAO,KdmsEP,UAAW,KACX,cAAe,KcvrET,mBd0rER,mBczrEQ,mBAHA,mBACA,mBd0rER,mBDHC,QAAA,IensEC,YAAa,WAoBX,eAAA,IACA,WAAA,IAAA,MAAA,KArBJ,mBdktEE,eAAgB,OAChB,cAAe,IAAI,MAAM,KDJ1B,uCCMD,uCcrtEA,wCdstEA,wCclrEI,2CANI,2CforEP,WAAA,EezqEG,mBf4qEH,WAAA,IAAA,MAAA,KCWD,cACE,iBAAkB,Kc/pEpB,6BdkqEA,6BcjqEE,6BAZM,6BfsqEP,6BCMD,6BDHC,QAAA,ICWD,gBACE,OAAQ,IAAI,MAAM,Kc1qEpB,4Bd6qEA,4Bc7qEA,4BAQQ,4Bf8pEP,4BCMD,4Bc7pEM,OAAA,IAAA,MAAA,KAYF,4BAFJ,4BfopEC,oBAAA,IevoEG,yCf0oEH,iBAAA,QehoE
 C,4BACA,iBAAA,QfooED,uBe9nEG,SAAA,OdyoEF,QAAS,acxoEL,MAAA,KAEA,sBfioEL,sBgB7wEC,SAAA,OfwxEA,QAAS,WACT,MAAO,KAST,0BerxEE,0Bf+wEF,0BAGA,0BexxEM,0BAMJ,0BfgxEF,0BAGA,0BACA,0BDNC,0BCAD,0BAGA,0BASE,iBAAkB,QDLnB,sCgBlyEC,sCAAA,oCfyyEF,sCetxEM,sCf2xEJ,iBAAkB,QASpB,2Be1yEE,2BfoyEF,2BAGA,2Be7yEM,2BAMJ,2BfqyEF,2BAGA,2BACA,2BDNC,2BCAD,2BAGA,2BASE,iBAAkB,QDLnB,uCgBvzEC,uCAAA,qCf8zEF,uCe3yEM,uCfgzEJ,iBAAkB,QASpB,wBe/zEE,wBfyzEF,wBAGA,wBel0EM,wBAMJ,wBf0zEF,wBAGA,wBACA,wBDNC,wBCAD,wBAGA,wBASE,iBAAkB,QDLnB,oCgB50EC,oCAAA,kCfm1EF,oCeh0EM,oCfq0EJ,iBAAkB,QASpB,2Bep1EE,2Bf80EF,2BAGA,2Bev1EM,2BAMJ,2Bf+0EF,2BAGA,2BACA,2BDNC,2BCAD,2BAGA,2BASE,iBAAkB,QDLnB,uCgBj2EC,uCAAA,qCfw2EF,uCer1EM,uCf01EJ,iBAAkB,QASpB,0Bez2EE,0Bfm2EF,0BAGA,0Be52EM,0BAMJ,0Bfo2EF,0BAGA,0BACA,0BDNC,0BCAD,0BAGA,0BASE,iBAAkB,QDLnB,sCehtEC,sCADF,oCdwtEA,sCe12EM,sCDoJJ,iBAAA,QA6DF,kBACE,WAAY,KA3DV,WAAA,KAEA,oCACA,kBACA,MAAA,KfotED,cAAA,Ke7pEC,WAAY,OAnDV,mBAAA,yBfmtEH,OAAA,IAAA,MAAA,KCWD,yBACE,cAAe,Ec5qEjB,qCd+qEA,qCcjtEI,qCARM,qCfktET,qCCMD,
 qCDHC,YAAA,OCWD,kCACE,OAAQ,EcvrEV,0Dd0rEA,0Dc1rEA,0DAzBU,0Df4sET,0DCMD,0DAME,YAAa,Ec/rEf,yDdksEA,yDclsEA,yDArBU,yDfgtET,yDCMD,yDAME,aAAc,EDLjB,yDe1sEW,yDEzNV,yDjBk6EC,yDiBj6ED,cAAA,GAMA,SjBk6ED,UAAA,EiB/5EC,QAAS,EACT,OAAA,EACA,OAAA,EAEA,OACA,QAAA,MACA,MAAA,KACA,QAAA,EACA,cAAA,KACA,UAAA,KjBi6ED,YAAA,QiB95EC,MAAO,KACP,OAAA,EACA,cAAA,IAAA,MAAA,QAEA,MjBg6ED,QAAA,aiBr5EC,UAAW,Kb4BX,cAAA,IACG,YAAA,IJ63EJ,mBiBr5EC,mBAAoB,WhBg6EjB,gBAAiB,WgB95EpB,WAAA,WjBy5ED,qBiBv5EC,kBAGA,OAAQ,IAAI,EAAE,EACd,WAAA,MjBs5ED,YAAA,OiBj5EC,iBACA,QAAA,MAIF,kBhB25EE,QAAS,MgBz5ET,MAAA,KAIF,iBAAA,ahB05EE,OAAQ,KI99ER,uBY2EF,2BjB64EC,wBiB54EC,QAAA,IAAA,KAAA,yBACA,eAAA,KAEA,OACA,QAAA,MjB+4ED,YAAA,IiBr3EC,UAAW,KACX,YAAA,WACA,MAAA,KAEA,cACA,QAAA,MACA,MAAA,KACA,OAAA,KACA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,WACA,MAAA,KbxDA,iBAAA,KACQ,iBAAA,KAyHR,OAAA,IAAA,MAAA,KACK,cAAA,IACG,mBAAA,MAAA,EAAA,IAAA,IAAA,iBJwzET,WAAA,MAAA,EAAA,IAAA,IAAA,iBkBh8EC,mBAAA,aAAA,YAAA,KAAA,mBAAA,YAAA,KACE,cAAA,aAAA,YAAA,KAAA,WAAA,YAAA,KACA,WAAA,aAAA,YAAA,
 KAAA,WAAA,YAAA,KdWM,oBJy7ET,aAAA,QIx5EC,QAAA,EACE,mBAAA,MAAA,EAAA,IAAA,IAAA,iBAAA,EAAA,EAAA,IAAA,qBACA,WAAA,MAAA,EAAA,IAAA,IAAA,iBAAA,EAAA,EAAA,IAAA,qBAEF,gCAA0B,MAAA,KJ25E3B,QAAA,EI15EiC,oCJ65EjC,MAAA,KiBh4EG,yCACA,MAAA,KAQF,0BhBs4EA,iBAAkB,YAClB,OAAQ,EgBn4EN,wBjB63EH,wBiB13EC,iChBq4EA,iBAAkB,KgBn4EhB,QAAA,EAIF,wBACE,iCjB03EH,OAAA,YiB72EC,sBjBg3ED,OAAA,KiB91EG,mBhB02EF,mBAAoB,KAEtB,qDgB32EM,8BjBo2EH,8BiBj2EC,wCAAA,+BhB62EA,YAAa,KgB32EX,iCjBy2EH,iCiBt2EC,2CAAA,kChB02EF,0BACA,0BACA,oCACA,2BAKE,YAAa,KgBh3EX,iCjB82EH,iCACF,2CiBp2EC,kChBu2EA,0BACA,0BACA,oCACA,2BgBz2EA,YAAA,MhBi3EF,YgBv2EE,cAAA,KAGA,UADA,OjBi2ED,SAAA,SiBr2EC,QAAS,MhBg3ET,WAAY,KgBx2EV,cAAA,KAGA,gBADA,aAEA,WAAA,KjBi2EH,aAAA,KiB91EC,cAAe,EhBy2Ef,YAAa,IACb,OAAQ,QgBp2ER,+BjBg2ED,sCiBl2EC,yBACA,gCAIA,SAAU,ShBw2EV,WAAY,MgBt2EZ,YAAA,MAIF,oBAAA,cAEE,WAAA,KAGA,iBADA,cAEA,SAAA,SACA,QAAA,aACA,aAAA,KjB61ED,cAAA,EiB31EC,YAAa,IhBs2Eb,eAAgB,OgBp2EhB,OAAA,QAUA,kCjBo1ED,4BCWC,WAAY,EACZ,YAAa,KgBv1Eb,wCAAA,qCjBm1ED,8BCOD,+BgBh2EI,2BhB+1EJ,4
 BAME,OAAQ,YDNT,0BiBv1EG,uBAMF,oCAAA,iChB61EA,OAAQ,YDNT,yBiBp1EK,sBAaJ,mCAFF,gCAGE,OAAA,YAGA,qBjBy0ED,WAAA,KiBv0EC,YAAA,IhBk1EA,eAAgB,IgBh1Ed,cAAA,EjB00EH,8BiB5zED,8BCnQE,cAAA,EACA,aAAA,EAEA,UACA,OAAA,KlBkkFD,QAAA,IAAA,KkBhkFC,UAAA,KACE,YAAA,IACA,cAAA,IAGF,gBjB0kFA,OAAQ,KiBxkFN,YAAA,KD2PA,0BAFJ,kBAGI,OAAA,KAEA,6BACA,OAAA,KjBy0EH,QAAA,IAAA,KiB/0EC,UAAW,KAST,YAAA,IACA,cAAA,IAVJ,mChB81EE,OAAQ,KgBh1EN,YAAA,KAGA,6CAjBJ,qCAkBI,OAAA,KAEA,oCACA,OAAA,KjBy0EH,WAAA,KiBr0EC,QAAS,IAAI,KC/Rb,UAAA,KACA,YAAA,IAEA,UACA,OAAA,KlBumFD,QAAA,KAAA,KkBrmFC,UAAA,KACE,YAAA,UACA,cAAA,IAGF,gBjB+mFA,OAAQ,KiB7mFN,YAAA,KDuRA,0BAFJ,kBAGI,OAAA,KAEA,6BACA,OAAA,KjBk1EH,QAAA,KAAA,KiBx1EC,UAAW,KAST,YAAA,UACA,cAAA,IAVJ,mChBu2EE,OAAQ,KgBz1EN,YAAA,KAGA,6CAjBJ,qCAkBI,OAAA,KAEA,oCACA,OAAA,KjBk1EH,WAAA,KiBz0EC,QAAS,KAAK,KAEd,UAAA,KjB00ED,YAAA,UiBt0EG,cjBy0EH,SAAA,SiBp0EC,4BACA,cAAA,OAEA,uBACA,SAAA,SACA,IAAA,EACA,MAAA,EACA,QAAA,EACA,QAAA,MACA,MAAA,KjBu0ED,OAAA,KiBr0EC,YAAa,KhBg1Eb,WAAY,OACZ,eAAgB,KDLjB,oDiBv0EC,uCADA,iCAGA,MAA
 O,KhBg1EP,OAAQ,KACR,YAAa,KDLd,oDiBv0EC,uCADA,iCAKA,MAAO,KhB80EP,OAAQ,KACR,YAAa,KAKf,uBAEA,8BAJA,4BADA,yBAEA,oBAEA,2BDNC,4BkBruFG,mCAJA,yBD0ZJ,gCbvWE,MAAA,QJ2rFD,2BkBxuFG,aAAA,QACE,mBAAA,MAAA,EAAA,IAAA,IAAA,iBd4CJ,WAAA,MAAA,EAAA,IAAA,IAAA,iBJgsFD,iCiBz1EC,aAAc,QC5YZ,mBAAA,MAAA,EAAA,IAAA,IAAA,iBAAA,EAAA,EAAA,IAAA,QACA,WAAA,MAAA,EAAA,IAAA,IAAA,iBAAA,EAAA,EAAA,IAAA,QlByuFH,gCiB91EC,MAAO,QCtYL,iBAAA,QlBuuFH,aAAA,QCWD,oCACE,MAAO,QAKT,uBAEA,8BAJA,4BADA,yBAEA,oBAEA,2BDNC,4BkBnwFG,mCAJA,yBD6ZJ,gCb1WE,MAAA,QJytFD,2BkBtwFG,aAAA,QACE,mBAAA,MAAA,EAAA,IAAA,IAAA,iBd4CJ,WAAA,MAAA,EAAA,IAAA,IAAA,iBJ8tFD,iCiBp3EC,aAAc,QC/YZ,mBAAA,MAAA,EAAA,IAAA,IAAA,iBAAA,EAAA,EAAA,IAAA,QACA,WAAA,MAAA,EAAA,IAAA,IAAA,iBAAA,EAAA,EAAA,IAAA,QlBuwFH,gCiBz3EC,MAAO,QCzYL,iBAAA,QlBqwFH,aAAA,QCWD,oCACE,MAAO,QAKT,qBAEA,4BAJA,0BADA,uBAEA,kBAEA,yBDNC,0BkBjyFG,iCAJA,uBDgaJ,8Bb7WE,MAAA,QJuvFD,yBkBpyFG,aAAA,QACE,mBAAA,MAAA,EAAA,IAAA,IAAA,iBd4CJ,WAAA,MAAA,EAAA,IAAA,IAAA,iBJ4vFD,+BiB/4EC,aAAc,QClZZ,mBAAA,MAAA,EAAA,IAAA,IAAA,iBAAA,EAA
 A,EAAA,IAAA,QACA,WAAA,MAAA,EAAA,IAAA,IAAA,iBAAA,EAAA,EAAA,IAAA,QlBqyFH,8BiBp5EC,MAAO,QC5YL,iBAAA,QlBmyFH,aAAA,QiB/4EG,kCjBk5EH,MAAA,QiB/4EG,2CjBk5EH,IAAA,KiBv4EC,mDACA,IAAA,EAEA,YjB04ED,QAAA,MiBvzEC,WAAY,IAwEZ,cAAe,KAtIX,MAAA,QAEA,yBjBy3EH,yBiBrvEC,QAAS,aA/HP,cAAA,EACA,eAAA,OjBw3EH,2BiB1vEC,QAAS,aAxHP,MAAA,KjBq3EH,eAAA,OiBj3EG,kCACA,QAAA,aAmHJ,0BhB4wEE,QAAS,aACT,eAAgB,OgBr3Ed,wCjB82EH,6CiBtwED,2CjBywEC,MAAA,KiB72EG,wCACA,MAAA,KAmGJ,4BhBwxEE,cAAe,EgBp3Eb,eAAA,OAGA,uBADA,oBjB82EH,QAAA,aiBpxEC,WAAY,EhB+xEZ,cAAe,EgBr3EX,eAAA,OAsFN,6BAAA,0BAjFI,aAAA,EAiFJ,4CjB6xEC,sCiBx2EG,SAAA,SjB22EH,YAAA,EiBh2ED,kDhB42EE,IAAK,GgBl2EL,2BjB+1EH,kCiBh2EG,wBAEA,+BAXF,YAAa,IhBo3Eb,WAAY,EgBn2EV,cAAA,EJviBF,2BIshBF,wBJrhBE,WAAA,KI4jBA,6BAyBA,aAAc,MAnCV,YAAA,MAEA,yBjBw1EH,gCACF,YAAA,IiBx3EG,cAAe,EAwCf,WAAA,OAwBJ,sDAdQ,MAAA,KjB80EL,yBACF,+CiBn0EC,YAAA,KAEE,UAAW,MjBs0EZ,yBACF,+CmBp6FG,YAAa,IACf,UAAA,MAGA,KACA,QAAA,aACA,QAAA,IAAA,KAAA,cAAA,EACA,UAAA,KACA,YAAA,IACA,YAAA,WACA,WAAA,OC0CA,YAAA,OACA,eAAA,OACA,iBAAA,a
 ACA,aAAA,ahB+JA,OAAA,QACG,oBAAA,KACC,iBAAA,KACI,gBAAA,KJ+tFT,YAAA,KmBv6FG,iBAAA,KlBm7FF,OAAQ,IAAI,MAAM,YAClB,cAAe,IkB96Ff,kBdzBA,kBACA,WLk8FD,kBCOD,kBADA,WAME,QAAS,IAAI,KAAK,yBAClB,eAAgB,KkBh7FhB,WnBy6FD,WmB56FG,WlBw7FF,MAAO,KkBn7FL,gBAAA,Kf6BM,YADR,YJk5FD,iBAAA,KmBz6FC,QAAA,ElBq7FA,mBAAoB,MAAM,EAAE,IAAI,IAAI,iBAC5B,WAAY,MAAM,EAAE,IAAI,IAAI,iBoBh+FpC,cAGA,ejB8DA,wBACQ,OAAA,YJ05FT,OAAA,kBmBz6FG,mBAAA,KlBq7FM,WAAY,KkBn7FhB,QAAA,IASN,eC3DE,yBACA,eAAA,KpBi+FD,aoB99FC,MAAA,KnB0+FA,iBAAkB,KmBx+FhB,aAAA,KpBk+FH,mBoBh+FO,mBAEN,MAAA,KACE,iBAAA,QACA,aAAA,QpBi+FH,mBoB99FC,MAAA,KnB0+FA,iBAAkB,QAClB,aAAc,QmBt+FR,oBADJ,oBpBi+FH,mCoB99FG,MAAA,KnB0+FF,iBAAkB,QAClB,aAAc,QmBt+FN,0BnB4+FV,0BAHA,0BmB1+FM,0BnB4+FN,0BAHA,0BDFC,yCoBx+FK,yCnB4+FN,yCmBv+FE,MAAA,KnB++FA,iBAAkB,QAClB,aAAc,QmBx+FZ,oBpBg+FH,oBoBh+FG,mCnB6+FF,iBAAkB,KmBz+FV,4BnB8+FV,4BAHA,4BDHC,6BCOD,6BAHA,6BkB39FA,sCClBM,sCnB8+FN,sCmBx+FI,iBAAA,KACA,aAAA,KDcJ,oBC9DE,MAAA,KACA,iBAAA,KpB0hGD,aoBvhGC,MAAA,KnBmiGA,iBAAkB,QmBjiGhB,aAAA,QpB2hGH,mBoBz
 hGO,mBAEN,MAAA,KACE,iBAAA,QACA,aAAA,QpB0hGH,mBoBvhGC,MAAA,KnBmiGA,iBAAkB,QAClB,aAAc,QmB/hGR,oBADJ,oBpB0hGH,mCoBvhGG,MAAA,KnBmiGF,iBAAkB,QAClB,aAAc,QmB/hGN,0BnBqiGV,0BAHA,0BmBniGM,0BnBqiGN,0BAHA,0BDFC,yCoBjiGK,yCnBqiGN,yCmBhiGE,MAAA,KnBwiGA,iBAAkB,QAClB,aAAc,QmBjiGZ,oBpByhGH,oBoBzhGG,mCnBsiGF,iBAAkB,KmBliGV,4BnBuiGV,4BAHA,4BDHC,6BCOD,6BAHA,6BkBjhGA,sCCrBM,sCnBuiGN,sCmBjiGI,iBAAA,QACA,aAAA,QDkBJ,oBClEE,MAAA,QACA,iBAAA,KpBmlGD,aoBhlGC,MAAA,KnB4lGA,iBAAkB,QmB1lGhB,aAAA,QpBolGH,mBoBllGO,mBAEN,MAAA,KACE,iBAAA,QACA,aAAA,QpBmlGH,mBoBhlGC,MAAA,KnB4lGA,iBAAkB,QAClB,aAAc,QmBxlGR,oBADJ,oBpBmlGH,mCoBhlGG,MAAA,KnB4lGF,iBAAkB,QAClB,aAAc,QmBxlGN,0BnB8lGV,0BAHA,0BmB5lGM,0BnB8lGN,0BAHA,0BDFC,yCoB1lGK,yCnB8lGN,yCmBzlGE,MAAA,KnBimGA,iBAAkB,QAClB,aAAc,QmB1lGZ,oBpBklGH,oBoBllGG,mCnB+lGF,iBAAkB,KmB3lGV,4BnBgmGV,4BAHA,4BDHC,6BCOD,6BAHA,6BkBtkGA,sCCzBM,sCnBgmGN,sCmB1lGI,iBAAA,QACA,aAAA,QDsBJ,oBCtEE,MAAA,QACA,iBAAA,KpB4oGD,UoBzoGC,MAAA,KnBqpGA,iBAAkB,QmBnpGhB,aAAA,QpB6oGH,gBoB3oGO,gBAEN,MAAA,KACE,iBAAA,QACA,
 aAAA,QpB4oGH,gBoBzoGC,MAAA,KnBqpGA,iBAAkB,QAClB,aAAc,QmBjpGR,iBADJ,iBpB4oGH,gCoBzoGG,MAAA,KnBqpGF,iBAAkB,QAClB,aAAc,QmBjpGN,uBnBupGV,uBAHA,uBmBrpGM,uBnBupGN,uBAHA,uBDFC,sCoBnpGK,sCnBupGN,sCmBlpGE,MAAA,KnB0pGA,iBAAkB,QAClB,aAAc,QmBnpGZ,iBpB2oGH,iBoB3oGG,gCnBwpGF,iBAAkB,KmBppGV,yBnBypGV,yBAHA,yBDHC,0BCOD,0BAHA,0BkB3nGA,mCC7BM,mCnBypGN,mCmBnpGI,iBAAA,QACA,aAAA,QD0BJ,iBC1EE,MAAA,QACA,iBAAA,KpBqsGD,aoBlsGC,MAAA,KnB8sGA,iBAAkB,QmB5sGhB,aAAA,QpBssGH,mBoBpsGO,mBAEN,MAAA,KACE,iBAAA,QACA,aAAA,QpBqsGH,mBoBlsGC,MAAA,KnB8sGA,iBAAkB,QAClB,aAAc,QmB1sGR,oBADJ,oBpBqsGH,mCoBlsGG,MAAA,KnB8sGF,iBAAkB,QAClB,aAAc,QmB1sGN,0BnBgtGV,0BAHA,0BmB9sGM,0BnBgtGN,0BAHA,0BDFC,yCoB5sGK,yCnBgtGN,yCmB3sGE,MAAA,KnBmtGA,iBAAkB,QAClB,aAAc,QmB5sGZ,oBpBosGH,oBoBpsGG,mCnBitGF,iBAAkB,KmB7sGV,4BnBktGV,4BAHA,4BDHC,6BCOD,6BAHA,6BkBhrGA,sCCjCM,sCnBktGN,sCmB5sGI,iBAAA,QACA,aAAA,QD8BJ,oBC9EE,MAAA,QACA,iBAAA,KpB8vGD,YoB3vGC,MAAA,KnBuwGA,iBAAkB,QmBrwGhB,aAAA,QpB+vGH,kBoB7vGO,kBAEN,MAAA,KACE,iBAAA,QACA,aAAA,QpB8vGH,kBoB3vGC,MAAA,KnBu
 wGA,iBAAkB,QAClB,aAAc,QmBnwGR,mBADJ,mBpB8vGH,kCoB3vGG,MAAA,KnBuwGF,iBAAkB,QAClB,aAAc,QmBnwGN,yBnBywGV,yBAHA,yBmBvwGM,yBnBywGN,yBAHA,yBDFC,wCoBrwGK,wCnBywGN,wCmBpwGE,MAAA,KnB4wGA,iBAAkB,QAClB,aAAc,QmBrwGZ,mBpB6vGH,mBoB7vGG,kCnB0wGF,iBAAkB,KmBtwGV,2BnB2wGV,2BAHA,2BDHC,4BCOD,4BAHA,4BkBruGA,qCCrCM,qCnB2wGN,qCmBrwGI,iBAAA,QACA,aAAA,QDuCJ,mBACE,MAAA,QACA,iBAAA,KnB+tGD,UmB5tGC,YAAA,IlBwuGA,MAAO,QACP,cAAe,EAEjB,UGzwGE,iBemCE,iBflCM,oBJkwGT,6BmB7tGC,iBAAA,YlByuGA,mBAAoB,KACZ,WAAY,KkBtuGlB,UAEF,iBAAA,gBnB6tGD,gBmB3tGG,aAAA,YnBiuGH,gBmB/tGG,gBAIA,MAAA,QlBuuGF,gBAAiB,UACjB,iBAAkB,YDNnB,0BmBhuGK,0BAUN,mCATM,mClB2uGJ,MAAO,KmB1yGP,gBAAA,KAGA,mBADA,QpBmyGD,QAAA,KAAA,KmBztGC,UAAW,KlBquGX,YAAa,UmBjzGb,cAAA,IAGA,mBADA,QpB0yGD,QAAA,IAAA,KmB5tGC,UAAW,KlBwuGX,YAAa,ImBxzGb,cAAA,IAGA,mBADA,QpBizGD,QAAA,IAAA,ImB3tGC,UAAW,KACX,YAAA,IACA,cAAA,IAIF,WACE,QAAA,MnB2tGD,MAAA,KCYD,sBACE,WAAY,IqBz3GZ,6BADF,4BtBk3GC,6BI7rGC,MAAA,KAEQ,MJisGT,QAAA,EsBr3GC,mBAAA,QAAA,KAAA,OACE,cAAA,QAAA,KAAA,OtBu3GH,WAAA,QAAA,KAAA,OsBl3
 GC,StBq3GD,QAAA,EsBn3Ga,UtBs3Gb,QAAA,KsBr3Ga,atBw3Gb,QAAA,MsBv3Ga,etB03Gb,QAAA,UsBt3GC,kBACA,QAAA,gBlBwKA,YACQ,SAAA,SAAA,OAAA,EAOR,SAAA,OACQ,mCAAA,KAAA,8BAAA,KAGR,2BAAA,KACQ,4BAAA,KAAA,uBAAA,KJ2sGT,oBAAA,KuBr5GC,4BAA6B,OAAQ,WACrC,uBAAA,OAAA,WACA,oBAAA,OAAA,WAEA,OACA,QAAA,aACA,MAAA,EACA,OAAA,EACA,YAAA,IACA,eAAA,OvBu5GD,WAAA,IAAA,OuBn5GC,WAAY,IAAI,QtBk6GhB,aAAc,IAAI,MAAM,YsBh6GxB,YAAA,IAAA,MAAA,YAKA,UADF,QvBo5GC,SAAA,SuB94GC,uBACA,QAAA,EAEA,eACA,SAAA,SACA,IAAA,KACA,KAAA,EACA,QAAA,KACA,QAAA,KACA,MAAA,KACA,UAAA,MACA,QAAA,IAAA,EACA,OAAA,IAAA,EAAA,EACA,UAAA,KACA,WAAA,KACA,WAAA,KnBsBA,iBAAA,KACQ,wBAAA,YmBrBR,gBAAA,YtB+5GA,OsB/5GA,IAAA,MAAA,KvBk5GD,OAAA,IAAA,MAAA,gBuB74GC,cAAA,IACE,mBAAA,EAAA,IAAA,KAAA,iBACA,WAAA,EAAA,IAAA,KAAA,iBAzBJ,0BCzBE,MAAA,EACA,KAAA,KAEA,wBxBo8GD,OAAA,IuB96GC,OAAQ,IAAI,EAmCV,SAAA,OACA,iBAAA,QAEA,oBACA,QAAA,MACA,QAAA,IAAA,KACA,MAAA,KvB84GH,YAAA,IuBx4GC,YAAA,WtBw5GA,MAAO,KsBt5GL,YAAA,OvB44GH,0BuB14GG,0BAMF,MAAA,QtBo5GA,gBAAiB,KACjB,iBAAkB,QsBj5GhB,yBAEA,+BADA,+BvBu4GH,
 MAAA,KuB73GC,gBAAA,KtB64GA,iBAAkB,QAClB,QAAS,EDZV,2BuB33GC,iCAAA,iCAEE,MAAA,KEzGF,iCF2GE,iCAEA,gBAAA,KvB63GH,OAAA,YuBx3GC,iBAAkB,YAGhB,iBAAA,KvBw3GH,OAAA,0DuBn3GG,qBvBs3GH,QAAA,MuB72GC,QACA,QAAA,EAQF,qBACE,MAAA,EACA,KAAA,KAIF,oBACE,MAAA,KACA,KAAA,EAEA,iBACA,QAAA,MACA,QAAA,IAAA,KvBw2GD,UAAA,KuBp2GC,YAAa,WACb,MAAA,KACA,YAAA,OAEA,mBACA,SAAA,MACA,IAAA,EvBs2GD,MAAA,EuBl2GC,OAAQ,EACR,KAAA,EACA,QAAA,IAQF,2BtB42GE,MAAO,EsBx2GL,KAAA,KAEA,eACA,sCvB41GH,QAAA,GuBn2GC,WAAY,EtBm3GZ,cAAe,IAAI,OsBx2GjB,cAAA,IAAA,QAEA,uBvB41GH,8CuBv0GC,IAAK,KAXL,OAAA,KApEA,cAAA,IvB25GC,yBuBv1GD,6BA1DA,MAAA,EACA,KAAA,KvBq5GD,kC0BpiHG,MAAO,KzBojHP,KAAM,GyBhjHR,W1BsiHD,oB0B1iHC,SAAU,SzB0jHV,QAAS,ayBpjHP,eAAA,OAGA,yB1BsiHH,gBCgBC,SAAU,SACV,MAAO,KyB7iHT,gC1BsiHC,gCCYD,+BAFA,+ByBhjHA,uBANM,uBzBujHN,sBAFA,sBAQE,QAAS,EyBljHP,qB1BuiHH,2B0BliHD,2BACE,iC1BoiHD,YAAA,KCgBD,aACE,YAAa,KDZd,kB0B1iHD,wBAAA,0BzB2jHE,MAAO,KDZR,kB0B/hHD,wBACE,0B1BiiHD,YAAA,I0B5hHC,yE1B+hHD,cAAA,E2BhlHC,4BACG,YAAA,EDsDL,mEzB6iHE,wBAAyB,E0B5lHzB,2BAAA,E3
 BilHD,6C0B5hHD,8CACE,uBAAA,E1B8hHD,0BAAA,E0B3hHC,sB1B8hHD,MAAA,KCgBD,8D0B/mHE,cAAA,E3BomHD,mE0B3hHD,oECjEE,wBAAA,EACG,2BAAA,EDqEL,oEzB0iHE,uBAAwB,EyBxiHxB,0BAAA,EAiBF,mCACE,iCACA,QAAA,EAEF,iCACE,cAAA,IACA,aAAA,IAKF,oCtB/CE,cAAA,KACQ,aAAA,KsBkDR,iCtBnDA,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBsByDV,0CACE,mBAAA,K1BugHD,WAAA,K0BngHC,YACA,YAAA,EAGF,eACE,aAAA,IAAA,IAAA,E1BqgHD,oBAAA,ECgBD,uBACE,aAAc,EAAE,IAAI,IyB1gHlB,yBACA,+BACA,oC1B+/GH,QAAA,M0BtgHC,MAAO,KAcH,MAAA,K1B2/GL,UAAA,KCgBD,oCACE,MAAO,KyBpgHL,8BACA,oC1By/GH,oC0Bp/GC,0CACE,WAAA,K1Bs/GH,YAAA,E2B/pHC,4DACC,cAAA,EAQA,sD3B4pHF,uBAAA,I0Bt/GC,wBAAA,IC/KA,2BAAA,EACC,0BAAA,EAQA,sD3BkqHF,uBAAA,E0Bv/GC,wBAAyB,EACzB,2BAAA,I1By/GD,0BAAA,ICgBD,uE0BtrHE,cAAA,E3B2qHD,4E0Bt/GD,6EC7LE,2BAAA,EACC,0BAAA,EDoMH,6EACE,uBAAA,EACA,wBAAA,EAEA,qB1Bo/GD,QAAA,M0Bx/GC,MAAO,KzBwgHP,aAAc,MyBjgHZ,gBAAA,SAEA,0B1Bq/GH,gC0B9/GC,QAAS,WAYP,MAAA,K1Bq/GH,MAAA,G0Bj/GG,qC1Bo/GH,MAAA,KCgBD,+CACE,KAAM,KyB7+GF,gDAFA,6C1Bs+GL,2D0Br+GK,wDEzOJ,SAAU,SACV,K
 AAA,cACA,eAAA,K5BitHD,a4B7sHC,SAAA,SACE,QAAA,MACA,gBAAA,S5BgtHH,0B4BxtHC,MAAO,KAeL,cAAA,EACA,aAAA,EAOA,2BACA,SAAA,S5BusHH,QAAA,E4BrsHG,MAAA,KACE,MAAA,K5BusHL,cAAA,ECgBD,iCACE,QAAS,EiBnrHT,8BACA,mCACA,sCACA,OAAA,KlBwqHD,QAAA,KAAA,KkBtqHC,UAAA,KjBsrHA,YAAa,UACb,cAAe,IiBrrHb,oClB0qHH,yCkBvqHC,4CjBurHA,OAAQ,KACR,YAAa,KDTd,8C4B/sHD,mDAAA,sD3B0tHA,sCACA,2CiBzrHI,8CjB8rHF,OAAQ,KiB1sHR,8BACA,mCACA,sCACA,OAAA,KlB+rHD,QAAA,IAAA,KkB7rHC,UAAA,KjB6sHA,YAAa,IACb,cAAe,IiB5sHb,oClBisHH,yCkB9rHC,4CjB8sHA,OAAQ,KACR,YAAa,KDTd,8C4B7tHD,mDAAA,sD3BwuHA,sCACA,2CiBhtHI,8CjBqtHF,OAAQ,K2BzuHR,2B5B6tHD,mB4B7tHC,iB3B8uHA,QAAS,W2BzuHX,8D5B6tHC,sD4B7tHD,oDAEE,cAAA,EAEA,mB5B+tHD,iB4B1tHC,MAAO,GACP,YAAA,OACA,eAAA,OAEA,mBACA,QAAA,IAAA,KACA,UAAA,KACA,YAAA,IACA,YAAA,EACA,MAAA,K5B4tHD,WAAA,O4BztHC,iBAAA,KACE,OAAA,IAAA,MAAA,KACA,cAAA,I5B4tHH,4B4BztHC,QAAA,IAAA,KACE,UAAA,KACA,cAAA,I5B4tHH,4B4B/uHC,QAAS,KAAK,K3B+vHd,UAAW,K2BruHT,cAAA,IAKJ,wCAAA,qC3BquHE,WAAY,EAEd,uCACA,+BACA,kC0B70HE,6CACG,8CC4GL,6D5BqtHC,wE4BptHC,wBAAA,
 E5ButHD,2BAAA,ECgBD,+BACE,aAAc,EAEhB,sCACA,8B2BhuHA,+D5BstHC,oDCWD,iC0Bl1HE,4CACG,6CCiHH,uBAAA,E5BwtHD,0BAAA,E4BltHC,8BAGA,YAAA,E5BotHD,iB4BxtHC,SAAU,SAUR,UAAA,E5BitHH,YAAA,O4B/sHK,sB5BktHL,SAAA,SCgBD,2BACE,YAAa,K2BxtHb,6BAAA,4B5B4sHD,4B4BzsHK,QAAA,EAGJ,kCAAA,wCAGI,aAAA,K5B4sHL,iC6B12HD,uCACE,QAAA,EACA,YAAA,K7B62HD,K6B/2HC,aAAc,EAOZ,cAAA,EACA,WAAA,KARJ,QAWM,SAAA,SACA,QAAA,M7B42HL,U6B12HK,SAAA,S5B03HJ,QAAS,M4Bx3HH,QAAA,KAAA,KAMJ,gB7Bu2HH,gB6Bt2HK,gBAAA,K7By2HL,iBAAA,KCgBD,mB4Br3HQ,MAAA,KAGA,yBADA,yB7B02HP,MAAA,K6Bl2HG,gBAAA,K5Bk3HF,OAAQ,YACR,iBAAkB,Y4B/2Hd,aAzCN,mB7B64HC,mBwBh5HC,iBAAA,KACA,aAAA,QAEA,kBxBm5HD,OAAA,I6Bn5HC,OAAQ,IAAI,EA0DV,SAAA,O7B41HH,iBAAA,Q6Bl1HC,c7Bq1HD,UAAA,K6Bn1HG,UAEA,cAAA,IAAA,MAAA,KALJ,aASM,MAAA,KACA,cAAA,KAEA,e7Bo1HL,aAAA,I6Bn1HK,YAAA,WACE,OAAA,IAAA,MAAA,Y7Bq1HP,cAAA,IAAA,IAAA,EAAA,ECgBD,qBACE,aAAc,KAAK,KAAK,K4B51HlB,sBAEA,4BADA,4BAEA,MAAA,K7Bi1HP,OAAA,Q6B50HC,iBAAA,KAqDA,OAAA,IAAA,MAAA,KA8BA,oBAAA,YAnFA,wBAwDE,MAAA,K7B2xHH,cAAA,E6BzxHK,2BACA,MAAA,KA3DJ,6BAgE
 E,cAAA,IACA,WAAA,OAYJ,iDA0DE,IAAK,KAjED,KAAA,K7B0xHH,yB6BztHD,2BA9DM,QAAA,W7B0xHL,MAAA,G6Bn2HD,6BAuFE,cAAA,GAvFF,6B5Bw3HA,aAAc,EACd,cAAe,IDZhB,kC6BtuHD,wCA3BA,wCATM,OAAA,IAAA,MAAA,K7B+wHH,yB6B3uHD,6B5B2vHE,cAAe,IAAI,MAAM,KACzB,cAAe,IAAI,IAAI,EAAE,EDZ1B,kC6B92HD,wC7B+2HD,wC6B72HG,oBAAA,MAIE,c7B+2HL,MAAA,K6B52HK,gB7B+2HL,cAAA,ICgBD,iBACE,YAAa,I4Bv3HP,uBAQR,6B7Bo2HC,6B6Bl2HG,MAAA,K7Bq2HH,iBAAA,Q6Bn2HK,gBACA,MAAA,KAYN,mBACE,WAAA,I7B41HD,YAAA,E6Bz1HG,e7B41HH,MAAA,K6B11HK,kBACA,MAAA,KAPN,oBAYI,cAAA,IACA,WAAA,OAYJ,wCA0DE,IAAK,KAjED,KAAA,K7B21HH,yB6B1xHD,kBA9DM,QAAA,W7B21HL,MAAA,G6Bl1HD,oBACA,cAAA,GAIE,oBACA,cAAA,EANJ,yB5B02HE,aAAc,EACd,cAAe,IDZhB,8B6B1yHD,oCA3BA,oCATM,OAAA,IAAA,MAAA,K7Bm1HH,yB6B/yHD,yB5B+zHE,cAAe,IAAI,MAAM,KACzB,cAAe,IAAI,IAAI,EAAE,EDZ1B,8B6Bx0HD,oC7By0HD,oC6Bv0HG,oBAAA,MAGA,uB7B00HH,QAAA,K6B/zHC,qBF3OA,QAAA,M3B+iID,yB8BxiIC,WAAY,KACZ,uBAAA,EACA,wBAAA,EAEA,Q9B0iID,SAAA,S8BliIC,WAAY,KA8nBZ,cAAe,KAhoBb,OAAA,IAAA,MAAA,Y9ByiIH,yB8BzhIC,QAgnBE,cAAe,K9B86GlB,yB8BjhIC,eACA,MAAA,M
 AGA,iBACA,cAAA,KAAA,aAAA,KAEA,WAAA,Q9BkhID,2BAAA,M8BhhIC,WAAA,IAAA,MAAA,YACE,mBAAA,MAAA,EAAA,IAAA,EAAA,qB9BkhIH,WAAA,MAAA,EAAA,IAAA,EAAA,qB8Bz7GD,oBArlBI,WAAA,KAEA,yBAAA,iB9BkhID,MAAA,K8BhhIC,WAAA,EACE,mBAAA,KACA,WAAA,KAEA,0B9BkhIH,QAAA,gB8B/gIC,OAAA,eACE,eAAA,E9BihIH,SAAA,kBCkBD,oBACE,WAAY,QDZf,sC8B/gIK,mC9B8gIH,oC8BzgIC,cAAe,E7B4hIf,aAAc,G6Bj+GlB,sCAnjBE,mC7ByhIA,WAAY,MDdX,4D8BngID,sC9BogID,mCCkBG,WAAY,O6B3gId,kCANE,gC9BsgIH,4B8BvgIG,0BAuiBF,aAAc,M7Bm/Gd,YAAa,MAEf,yBDZC,kC8B3gIK,gC9B0gIH,4B8B3gIG,0BAcF,aAAc,EAChB,YAAA,GAMF,mBA8gBE,QAAS,KAhhBP,aAAA,EAAA,EAAA,I9BkgIH,yB8B7/HC,mB7B+gIE,cAAe,G6B1gIjB,qBADA,kB9BggID,SAAA,M8Bz/HC,MAAO,EAggBP,KAAM,E7B4gHN,QAAS,KDdR,yB8B7/HD,qB9B8/HD,kB8B7/HC,cAAA,GAGF,kBACE,IAAA,EACA,aAAA,EAAA,EAAA,I9BigID,qB8B1/HC,OAAQ,EACR,cAAA,EACA,aAAA,IAAA,EAAA,EAEA,cACA,MAAA,K9B4/HD,OAAA,K8B1/HC,QAAA,KAAA,K7B4gIA,UAAW,K6B1gIT,YAAA,KAIA,oBAbJ,oB9BwgIC,gBAAA,K8Bv/HG,kB7B0gIF,QAAS,MDdR,yBACF,iC8Bh/HC,uCACA,YAAA,OAGA,eC9LA,SAAA,SACA,MAAA,MD+LA,QAAA,IAAA,KACA,WAAA,IACA,
 aAAA,KACA,cAAA,I9Bm/HD,iBAAA,Y8B/+HC,iBAAA,KACE,OAAA,IAAA,MAAA,Y9Bi/HH,cAAA,I8B5+HG,qBACA,QAAA,EAEA,yB9B++HH,QAAA,M8BrgIC,MAAO,KAyBL,OAAA,I9B++HH,cAAA,I8BpjHD,mCAvbI,WAAA,I9Bg/HH,yB8Bt+HC,eACA,QAAA,MAGE,YACA,OAAA,MAAA,M9By+HH,iB8B58HC,YAAA,KA2YA,eAAgB,KAjaZ,YAAA,KAEA,yBACA,iCACA,SAAA,OACA,MAAA,KACA,MAAA,KAAA,WAAA,E9Bs+HH,iBAAA,Y8B3kHC,OAAQ,E7B8lHR,mBAAoB,K6Bt/HhB,WAAA,KAGA,kDAqZN,sC9BklHC,QAAA,IAAA,KAAA,IAAA,KCmBD,sC6Bv/HQ,YAAA,KAmBR,4C9Bs9HD,4C8BvlHG,iBAAkB,M9B4lHnB,yB8B5lHD,YAtYI,MAAA,K9Bq+HH,OAAA,E8Bn+HK,eACA,MAAA,K9Bu+HP,iB8B39HG,YAAa,KACf,eAAA,MAGA,aACA,QAAA,KAAA,K1B9NA,WAAA,IACQ,aAAA,M2B/DR,cAAA,IACA,YAAA,M/B4vID,WAAA,IAAA,MAAA,YiBtuHC,cAAe,IAAI,MAAM,YAwEzB,mBAAoB,MAAM,EAAE,IAAI,EAAE,qBAAyB,EAAE,IAAI,EAAE,qBAtI/D,WAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,EAAA,IAAA,EAAA,qBAEA,yBjBwyHH,yBiBpqHC,QAAS,aA/HP,cAAA,EACA,eAAA,OjBuyHH,2BiBzqHC,QAAS,aAxHP,MAAA,KjBoyHH,eAAA,OiBhyHG,kCACA,QAAA,aAmHJ,0BhBmsHE,QAAS,aACT,eAAgB,OgB5yHd,wCjB6xHH,6CiBrrHD,2CjBwrHC,MAAA,KiB5xHG,wCACA,MAAA,KAmGJ,4BhB+sHE,cA
 Ae,EgB3yHb,eAAA,OAGA,uBADA,oBjB6xHH,QAAA,aiBnsHC,WAAY,EhBstHZ,cAAe,EgB5yHX,eAAA,OAsFN,6BAAA,0BAjFI,aAAA,EAiFJ,4CjB4sHC,sCiBvxHG,SAAA,SjB0xHH,YAAA,E8BngID,kDAmWE,IAAK,GAvWH,yBACE,yB9B8gIL,cAAA,I8B5/HD,oCAoVE,cAAe,GA1Vf,yBACA,aACA,MAAA,KACA,YAAA,E1BzPF,eAAA,EACQ,aAAA,EJmwIP,YAAA,EACF,OAAA,E8BngIG,mBAAoB,KACtB,WAAA,M9BugID,8B8BngIC,WAAY,EACZ,uBAAA,EHzUA,wBAAA,EAQA,mDACC,cAAA,E3By0IF,uBAAA,I8B//HC,wBAAyB,IChVzB,2BAAA,EACA,0BAAA,EDkVA,YCnVA,WAAA,IACA,cAAA,IDqVA,mBCtVA,WAAA,KACA,cAAA,KD+VF,mBChWE,WAAA,KACA,cAAA,KDuWF,aAsSE,WAAY,KA1SV,cAAA,KAEA,yB9B+/HD,aACF,MAAA,K8Bl+HG,aAAc,KAhBhB,YAAA,MACA,yBE5WA,aF8WE,MAAA,eAFF,cAKI,MAAA,gB9Bu/HH,aAAA,M8B7+HD,4BACA,aAAA,GADF,gBAKI,iBAAA,Q9Bg/HH,aAAA,QCmBD,8B6BhgIM,MAAA,KARN,oC9B0/HC,oC8B5+HG,MAAA,Q9B++HH,iBAAA,Y8B1+HK,6B9B6+HL,MAAA,KCmBD,iC6B5/HQ,MAAA,KAKF,uC9By+HL,uCCmBC,MAAO,KACP,iBAAkB,Y6Bz/HZ,sCAIF,4C9Bu+HL,4CCmBC,MAAO,KACP,iBAAkB,Q6Bv/HZ,wCAxCR,8C9BihIC,8C8Bn+HG,MAAA,K9Bs+HH,iBAAA,YCmBD,+B6Bt/HM,aAAA,KAGA,qCApDN,qC9B2hIC,iBAAA,KCmBD,yC6Bp/HI,iBAAA
 ,KAOE,iCAAA,6B7Bk/HJ,aAAc,Q6B9+HR,oCAiCN,0C9B+7HD,0C8B3xHC,MAAO,KA7LC,iBAAA,QACA,yB7B8+HR,sD6B5+HU,MAAA,KAKF,4D9By9HP,4DCmBC,MAAO,KACP,iBAAkB,Y6Bz+HV,2DAIF,iE9Bu9HP,iECmBC,MAAO,KACP,iBAAkB,Q6Bv+HV,6D9B09HX,mEADE,mE8B1jIC,MAAO,KA8GP,iBAAA,aAEE,6B9Bi9HL,MAAA,K8B58HG,mC9B+8HH,MAAA,KCmBD,0B6B/9HM,MAAA,KAIA,gCAAA,gC7Bg+HJ,MAAO,K6Bt9HT,0CARQ,0CASN,mD9Bu8HD,mD8Bt8HC,MAAA,KAFF,gBAKI,iBAAA,K9B08HH,aAAA,QCmBD,8B6B19HM,MAAA,QARN,oC9Bo9HC,oC8Bt8HG,MAAA,K9By8HH,iBAAA,Y8Bp8HK,6B9Bu8HL,MAAA,QCmBD,iC6Bt9HQ,MAAA,QAKF,uC9Bm8HL,uCCmBC,MAAO,KACP,iBAAkB,Y6Bn9HZ,sCAIF,4C9Bi8HL,4CCmBC,MAAO,KACP,iBAAkB,Q6Bj9HZ,wCAxCR,8C9B2+HC,8C8B57HG,MAAA,K9B+7HH,iBAAA,YCmBD,+B6B/8HM,aAAA,KAGA,qCArDN,qC9Bq/HC,iBAAA,KCmBD,yC6B78HI,iBAAA,KAME,iCAAA,6B7B48HJ,aAAc,Q6Bx8HR,oCAuCN,0C9Bm5HD,0C8B33HC,MAAO,KAvDC,iBAAA,QAuDV,yBApDU,kE9Bs7HP,aAAA,Q8Bn7HO,0D9Bs7HP,iBAAA,QCmBD,sD6Bt8HU,MAAA,QAKF,4D9Bm7HP,4DCmBC,MAAO,KACP,iBAAkB,Y6Bn8HV,2DAIF,iE9Bi7HP,iECmBC,MAAO,KACP,iBAAkB,Q6Bj8HV,6D9Bo7HX,mEADE,mE8B1hIC,MAAO,KA+GP,iBAAA,aAEE,6B9Bg7H
 L,MAAA,Q8B36HG,mC9B86HH,MAAA,KCmBD,0B6B97HM,MAAA,QAIA,gCAAA,gC7B+7HJ,MAAO,KgCvkJT,0CH0oBQ,0CGzoBN,mDjCwjJD,mDiCvjJC,MAAA,KAEA,YACA,QAAA,IAAA,KjC2jJD,cAAA,KiChkJC,WAAY,KAQV,iBAAA,QjC2jJH,cAAA,IiCxjJK,eACA,QAAA,ajC4jJL,yBiCxkJC,QAAS,EAAE,IAkBT,MAAA,KjCyjJH,QAAA,SkC5kJC,oBACA,MAAA,KAEA,YlC+kJD,QAAA,akCnlJC,aAAc,EAOZ,OAAA,KAAA,ElC+kJH,cAAA,ICmBD,eiC/lJM,QAAA,OAEA,iBACA,oBACA,SAAA,SACA,MAAA,KACA,QAAA,IAAA,KACA,YAAA,KACA,YAAA,WlCglJL,MAAA,QkC9kJG,gBAAA,KjCimJF,iBAAkB,KiC9lJZ,OAAA,IAAA,MAAA,KPVH,6B3B2lJJ,gCkC7kJG,YAAA,EjCgmJF,uBAAwB,I0BvnJxB,0BAAA,I3BymJD,4BkCxkJG,+BjC2lJF,wBAAyB,IACzB,2BAA4B,IiCxlJxB,uBAFA,uBAGA,0BAFA,0BlC8kJL,QAAA,EkCtkJG,MAAA,QjCylJF,iBAAkB,KAClB,aAAc,KAEhB,sBiCvlJM,4BAFA,4BjC0lJN,yBiCvlJM,+BAFA,+BAGA,QAAA,ElC2kJL,MAAA,KkCloJC,OAAQ,QjCqpJR,iBAAkB,QAClB,aAAc,QiCnlJV,wBAEA,8BADA,8BjColJN,2BiCtlJM,iCjCulJN,iCDZC,MAAA,KkC/jJC,OAAQ,YjCklJR,iBAAkB,KkC7pJd,aAAA,KAEA,oBnC8oJL,uBmC5oJG,QAAA,KAAA,KlC+pJF,UAAW,K0B1pJX,YAAA,U3B4oJD,gCmC3oJG,mClC8pJF,uBAAwB,I0BvqJxB,0BAAA,I3BypJD,+B
 kC1kJD,kCjC6lJE,wBAAyB,IkC7qJrB,2BAAA,IAEA,oBnC8pJL,uBmC5pJG,QAAA,IAAA,KlC+qJF,UAAW,K0B1qJX,YAAA,I3B4pJD,gCmC3pJG,mClC8qJF,uBAAwB,I0BvrJxB,0BAAA,I3ByqJD,+BoC3qJD,kCACE,wBAAA,IACA,2BAAA,IAEA,OpC6qJD,aAAA,EoCjrJC,OAAQ,KAAK,EAOX,WAAA,OpC6qJH,WAAA,KCmBD,UmC7rJM,QAAA,OAEA,YACA,eACA,QAAA,apC8qJL,QAAA,IAAA,KoC5rJC,iBAAkB,KnC+sJlB,OAAQ,IAAI,MAAM,KmC5rJd,cAAA,KAnBN,kBpCisJC,kBCmBC,gBAAiB,KmCzrJb,iBAAA,KA3BN,eAAA,kBAkCM,MAAA,MAlCN,mBAAA,sBnC6tJE,MAAO,KmClrJH,mBAEA,yBADA,yBpCqqJL,sBqCltJC,MAAO,KACP,OAAA,YACA,iBAAA,KAEA,OACA,QAAA,OACA,QAAA,KAAA,KAAA,KACA,UAAA,IACA,YAAA,IACA,YAAA,EACA,MAAA,KrCotJD,WAAA,OqChtJG,YAAA,OpCmuJF,eAAgB,SoCjuJZ,cAAA,MrCotJL,cqCltJK,cAKJ,MAAA,KACE,gBAAA,KrC+sJH,OAAA,QqC1sJG,aACA,QAAA,KAOJ,YCtCE,SAAA,StC+uJD,IAAA,KCmBD,eqC7vJM,iBAAA,KALJ,2BD0CF,2BrC4sJC,iBAAA,QCmBD,eqCpwJM,iBAAA,QALJ,2BD8CF,2BrC+sJC,iBAAA,QCmBD,eqC3wJM,iBAAA,QALJ,2BDkDF,2BrCktJC,iBAAA,QCmBD,YqClxJM,iBAAA,QALJ,wBDsDF,wBrCqtJC,iBAAA,QCmBD,eqCzxJM,iBAAA,QALJ,2BD0DF,2BrCwtJC,iBAAA,QCmBD,cqChyJM,iBAAA,QCDJ,0BA
 DF,0BAEE,iBAAA,QAEA,OACA,QAAA,aACA,UAAA,KACA,QAAA,IAAA,IACA,UAAA,KACA,YAAA,IACA,YAAA,EACA,MAAA,KACA,WAAA,OvCqxJD,YAAA,OuClxJC,eAAA,OACE,iBAAA,KvCoxJH,cAAA,KuC/wJG,aACA,QAAA,KAGF,YtCkyJA,SAAU,SsChyJR,IAAA,KAMA,0BvC4wJH,eCmBC,IAAK,EsC7xJD,QAAA,IAAA,IvCgxJL,cuC9wJK,cAKJ,MAAA,KtC4xJA,gBAAiB,KsC1xJf,OAAA,QvC4wJH,+BuCxwJC,4BACE,MAAA,QvC0wJH,iBAAA,KuCtwJG,wBvCywJH,MAAA,MuCrwJG,+BvCwwJH,aAAA,IwCj0JC,uBACA,YAAA,IAEA,WACA,YAAA,KxCo0JD,eAAA,KwCz0JC,cAAe,KvC41Jf,MAAO,QuCn1JL,iBAAA,KAIA,eAbJ,cAcI,MAAA,QxCo0JH,awCl1JC,cAAe,KAmBb,UAAA,KxCk0JH,YAAA,ICmBD,cuCh1JI,iBAAA,QAEA,sBxCi0JH,4BwC31JC,cAAe,KA8Bb,aAAA,KxCg0JH,cAAA,IwC7yJD,sBAfI,UAAA,KxCi0JD,oCwC9zJC,WvCi1JA,YAAa,KuC/0JX,eAAA,KxCi0JH,sBwCvzJD,4BvC00JE,cAAe,KuC90Jb,aAAA,KC5CJ,ezC42JD,cyC32JC,UAAA,MAGA,WACA,QAAA,MACA,QAAA,IACA,cAAA,KrCiLA,YAAA,WACK,iBAAA,KACG,OAAA,IAAA,MAAA,KJ8rJT,cAAA,IyCx3JC,mBAAoB,OAAO,IAAI,YxC24J1B,cAAe,OAAO,IAAI,YwC93J7B,WAAA,OAAA,IAAA,YAKF,iBzC22JD,eCmBC,aAAc,KACd,YAAa,KwCv3JX,mBA1BJ,kBzCk4JC,kByCv2JG,aAAA,QCzBJ,oBACE,QAAA,
 IACA,MAAA,KAEA,O1Cs4JD,QAAA,K0C14JC,cAAe,KAQb,OAAA,IAAA,MAAA,YAEA,cAAA,IAVJ,UAeI,WAAA,E1Ck4JH,MAAA,QCmBD,mByC/4JI,YAAA,IArBJ,SAyBI,U1C+3JH,cAAA,ECmBD,WyCx4JE,WAAA,IAFF,mBAAA,mBAMI,cAAA,KAEA,0BACA,0B1Cy3JH,SAAA,S0Cj3JC,IAAK,KCvDL,MAAA,MACA,MAAA,Q3C46JD,e0Ct3JC,MAAO,QClDL,iBAAA,Q3C26JH,aAAA,Q2Cx6JG,kB3C26JH,iBAAA,Q2Cn7JC,2BACA,MAAA,Q3Cu7JD,Y0C73JC,MAAO,QCtDL,iBAAA,Q3Cs7JH,aAAA,Q2Cn7JG,e3Cs7JH,iBAAA,Q2C97JC,wBACA,MAAA,Q3Ck8JD,e0Cp4JC,MAAO,QC1DL,iBAAA,Q3Ci8JH,aAAA,Q2C97JG,kB3Ci8JH,iBAAA,Q2Cz8JC,2BACA,MAAA,Q3C68JD,c0C34JC,MAAO,QC9DL,iBAAA,Q3C48JH,aAAA,Q2Cz8JG,iB3C48JH,iBAAA,Q4C78JC,0BAAQ,MAAA,QACR,wCAAQ,K5Cm9JP,oBAAA,KAAA,E4C/8JD,GACA,oBAAA,EAAA,GACA,mCAAQ,K5Cq9JP,oBAAA,KAAA,E4Cv9JD,GACA,oBAAA,EAAA,GACA,gCAAQ,K5Cq9JP,oBAAA,KAAA,E4C78JD,GACA,oBAAA,EAAA,GAGA,UACA,OAAA,KxCsCA,cAAA,KACQ,SAAA,OJ26JT,iBAAA,Q4C78JC,cAAe,IACf,mBAAA,MAAA,EAAA,IAAA,IAAA,eACA,WAAA,MAAA,EAAA,IAAA,IAAA,eAEA,cACA,MAAA,KACA,MAAA,EACA,OAAA,KACA,UAAA,KxCyBA,YAAA,KACQ,MAAA,KAyHR,WAAA,OACK,iBAAA,QACG,mBAAA,MAAA,EAAA,KAAA,E
 AAA,gBJ+zJT,WAAA,MAAA,EAAA,KAAA,EAAA,gB4C18JC,mBAAoB,MAAM,IAAI,K3Cq+JzB,cAAe,MAAM,IAAI,K4Cp+J5B,WAAA,MAAA,IAAA,KDEF,sBCAE,gCDAF,iBAAA,yK5C88JD,iBAAA,oK4Cv8JC,iBAAiB,iK3Cm+JjB,wBAAyB,KAAK,KG/gK9B,gBAAA,KAAA,KJy/JD,qBIv/JS,+BwCmDR,kBAAmB,qBAAqB,GAAG,OAAO,SErElD,aAAA,qBAAA,GAAA,OAAA,S9C4gKD,UAAA,qBAAA,GAAA,OAAA,S6Cz9JG,sBACA,iBAAA,Q7C69JH,wC4Cx8JC,iBAAkB,yKEzElB,iBAAA,oK9CohKD,iBAAA,iK6Cj+JG,mBACA,iBAAA,Q7Cq+JH,qC4C58JC,iBAAkB,yKE7ElB,iBAAA,oK9C4hKD,iBAAA,iK6Cz+JG,sBACA,iBAAA,Q7C6+JH,wC4Ch9JC,iBAAkB,yKEjFlB,iBAAA,oK9CoiKD,iBAAA,iK6Cj/JG,qBACA,iBAAA,Q7Cq/JH,uC+C5iKC,iBAAkB,yKAElB,iBAAA,oK/C6iKD,iBAAA,iK+C1iKG,O/C6iKH,WAAA,KC4BD,mB8CnkKE,WAAA,E/C4iKD,O+CxiKD,YACE,SAAA,O/C0iKD,KAAA,E+CtiKC,Y/CyiKD,MAAA,Q+CriKG,c/CwiKH,QAAA,MC4BD,4B8C9jKE,UAAA,KAGF,aAAA,mBAEE,aAAA,KAGF,YAAA,kB9C+jKE,cAAe,K8CxjKjB,YAHE,Y/CoiKD,a+ChiKC,QAAA,W/CmiKD,eAAA,I+C/hKC,c/CkiKD,eAAA,O+C7hKC,cACA,eAAA,OAMF,eACE,WAAA,EACA,cAAA,ICvDF,YAEE,aAAA,EACA,WAAA,KAQF,YACE,aAAA,EACA,cAAA,KAGA,iBACA,SAAA,SACA,QAAA,MhD6kKD,QAAA,KAA
 A,KgD1kKC,cAAA,KrB3BA,iBAAA,KACC,OAAA,IAAA,MAAA,KqB6BD,6BACE,uBAAA,IrBvBF,wBAAA,I3BsmKD,4BgDpkKC,cAAe,E/CgmKf,2BAA4B,I+C9lK5B,0BAAA,IAFF,kBAAA,uBAKI,MAAA,KAIF,2CAAA,gD/CgmKA,MAAO,K+C5lKL,wBAFA,wBhDykKH,6BgDxkKG,6BAKF,MAAO,KACP,gBAAA,KACA,iBAAA,QAKA,uB/C4lKA,MAAO,KACP,WAAY,K+CzlKV,0BhDmkKH,gCgDlkKG,gCALF,MAAA,K/CmmKA,OAAQ,YACR,iBAAkB,KDxBnB,mDgD5kKC,yDAAA,yD/CymKA,MAAO,QDxBR,gDgDhkKC,sDAAA,sD/C6lKA,MAAO,K+CzlKL,wBAEA,8BADA,8BhDmkKH,QAAA,EgDxkKC,MAAA,K/ComKA,iBAAkB,QAClB,aAAc,QAEhB,iDDpBC,wDCuBD,uDADA,uD+CzmKE,8DAYI,6D/C4lKN,uD+CxmKE,8D/C2mKF,6DAKE,MAAO,QDxBR,8CiD1qKG,oDADF,oDAEE,MAAA,QAEA,yBhDusKF,MAAO,QgDrsKH,iBAAA,QAFF,0BAAA,+BAKI,MAAA,QAGF,mDAAA,wDhDwsKJ,MAAO,QDtBR,gCiDhrKO,gCAGF,qCAFE,qChD2sKN,MAAO,QACP,iBAAkB,QAEpB,iCgDvsKQ,uCAFA,uChD0sKR,sCDtBC,4CiDnrKO,4CArBN,MAAA,KACE,iBAAA,QACA,aAAA,QAEA,sBhDouKF,MAAO,QgDluKH,iBAAA,QAFF,uBAAA,4BAKI,MAAA,QAGF,gDAAA,qDhDquKJ,MAAO,QDtBR,6BiD7sKO,6BAGF,kCAFE,kChDwuKN,MAAO,QACP,iBAAkB,QAEpB,8BgDpuKQ,oCAFA,oChDuuKR,mCDtBC,yCiDhtKO,yCArBN,MAAA,KACE
 ,iBAAA,QACA,aAAA,QAEA,yBhDiwKF,MAAO,QgD/vKH,iBAAA,QAFF,0BAAA,+BAKI,MAAA,QAGF,mDAAA,wDhDkwKJ,MAAO,QDtBR,gCiD1uKO,gCAGF,qCAFE,qChDqwKN,MAAO,QACP,iBAAkB,QAEpB,iCgDjwKQ,uCAFA,uChDowKR,sCDtBC,4CiD7uKO,4CArBN,MAAA,KACE,iBAAA,QACA,aAAA,QAEA,wBhD8xKF,MAAO,QgD5xKH,iBAAA,QAFF,yBAAA,8BAKI,MAAA,QAGF,kDAAA,uDhD+xKJ,MAAO,QDtBR,+BiDvwKO,+BAGF,oCAFE,oChDkyKN,MAAO,QACP,iBAAkB,QAEpB,gCgD9xKQ,sCAFA,sChDiyKR,qCDtBC,2CiD1wKO,2CDkGN,MAAO,KACP,iBAAA,QACA,aAAA,QAEF,yBACE,WAAA,EACA,cAAA,IE1HF,sBACE,cAAA,EACA,YAAA,IAEA,O9C0DA,cAAA,KACQ,iBAAA,KJ6uKT,OAAA,IAAA,MAAA,YkDnyKC,cAAe,IACf,mBAAA,EAAA,IAAA,IAAA,gBlDqyKD,WAAA,EAAA,IAAA,IAAA,gBkD/xKC,YACA,QAAA,KvBnBC,e3BuzKF,QAAA,KAAA,KkDtyKC,cAAe,IAAI,MAAM,YAMvB,uBAAA,IlDmyKH,wBAAA,IkD7xKC,0CACA,MAAA,QAEA,alDgyKD,WAAA,EkDpyKC,cAAe,EjDg0Kf,UAAW,KACX,MAAO,QDtBR,oBkD1xKC,sBjDkzKF,eiDxzKI,mBAKJ,qBAEE,MAAA,QvBvCA,cACC,QAAA,KAAA,K3Bs0KF,iBAAA,QkDrxKC,WAAY,IAAI,MAAM,KjDizKtB,2BAA4B,IiD9yK1B,0BAAA,IAHJ,mBAAA,mCAMM,cAAA,ElDwxKL,oCkDnxKG,oDjD+yKF,aAAc,IAAI,EiD7yKZ,cAAA,EvBtEL,4D
 3B61KF,4EkDjxKG,WAAA,EjD6yKF,uBAAwB,IiD3yKlB,wBAAA,IvBtEL,0D3B21KF,0EkD1yKC,cAAe,EvB1Df,2BAAA,IACC,0BAAA,IuB0FH,+EAEI,uBAAA,ElD8wKH,wBAAA,EkD1wKC,wDlD6wKD,iBAAA,EC4BD,0BACE,iBAAkB,EiDlyKpB,8BlD0wKC,ckD1wKD,gCjDuyKE,cAAe,EiDvyKjB,sCAQM,sBlDwwKL,wCC4BC,cAAe,K0Br5Kf,aAAA,KuByGF,wDlDqxKC,0BC4BC,uBAAwB,IACxB,wBAAyB,IiDlzK3B,yFAoBQ,yFlDwwKP,2DkDzwKO,2DjDqyKN,uBAAwB,IACxB,wBAAyB,IAK3B,wGiD9zKA,wGjD4zKA,wGDtBC,wGCuBD,0EiD7zKA,0EjD2zKA,0EiDnyKU,0EjD2yKR,uBAAwB,IAK1B,uGiDx0KA,uGjDs0KA,uGDtBC,uGCuBD,yEiDv0KA,yEjDq0KA,yEiDzyKU,yEvB7HR,wBAAA,IuBiGF,sDlDqzKC,yBC4BC,2BAA4B,IAC5B,0BAA2B,IiDxyKrB,qFA1CR,qFAyCQ,wDlDmxKP,wDC4BC,2BAA4B,IAC5B,0BAA2B,IAG7B,oGDtBC,oGCwBD,oGiD91KA,oGjD21KA,uEiD7yKU,uEjD+yKV,uEiD71KA,uEjDm2KE,0BAA2B,IAG7B,mGDtBC,mGCwBD,mGiDx2KA,mGjDq2KA,sEiDnzKU,sEjDqzKV,sEiDv2KA,sEjD62KE,2BAA4B,IiDlzK1B,0BlD2xKH,qCkDt1KD,0BAAA,qCA+DI,WAAA,IAAA,MAAA,KA/DJ,kDAAA,kDAmEI,WAAA,EAnEJ,uBAAA,yCjD23KE,OAAQ,EiDjzKA,+CjDqzKV,+CiD/3KA,+CjDi4KA,+CAEA,+CANA,+CDjBC,iECoBD,iEiDh4KA,iEjDk4KA,iEAEA,iEANA,iE
 AWE,YAAa,EiD3zKL,8CjD+zKV,8CiD74KA,8CjD+4KA,8CAEA,8CANA,8CDjBC,gECoBD,gEiD94KA,gEjDg5KA,gEAEA,gEANA,gEAWE,aAAc,EAIhB,+CiD35KA,+CjDy5KA,+CiDl0KU,+CjDq0KV,iEiD55KA,iEjD05KA,iEDtBC,iEC6BC,cAAe,EAEjB,8CiDn0KU,8CjDq0KV,8CiDr6KA,8CjDo6KA,gEDtBC,gECwBD,gEiDh0KI,gEACA,cAAA,EAUJ,yBACE,cAAA,ElDmyKD,OAAA,EkD/xKG,aACA,cAAA,KANJ,oBASM,cAAA,ElDkyKL,cAAA,IkD7xKG,2BlDgyKH,WAAA,IC4BD,4BiDxzKM,cAAA,EAKF,wDAvBJ,wDlDqzKC,WAAA,IAAA,MAAA,KkD5xKK,2BlD+xKL,WAAA,EmDlhLC,uDnDqhLD,cAAA,IAAA,MAAA,KmDlhLG,eACA,aAAA,KnDshLH,8BmDxhLC,MAAA,KAMI,iBAAA,QnDqhLL,aAAA,KmDlhLK,0DACA,iBAAA,KAGJ,qCAEI,MAAA,QnDmhLL,iBAAA,KmDpiLC,yDnDuiLD,oBAAA,KmDpiLG,eACA,aAAA,QnDwiLH,8BmD1iLC,MAAA,KAMI,iBAAA,QnDuiLL,aAAA,QmDpiLK,0DACA,iBAAA,QAGJ,qCAEI,MAAA,QnDqiLL,iBAAA,KmDtjLC,yDnDyjLD,oBAAA,QmDtjLG,eACA,aAAA,QnD0jLH,8BmD5jLC,MAAA,QAMI,iBAAA,QnDyjLL,aAAA,QmDtjLK,0DACA,iBAAA,QAGJ,qCAEI,MAAA,QnDujLL,iBAAA,QmDxkLC,yDnD2kLD,oBAAA,QmDxkLG,YACA,aAAA,QnD4kLH,2BmD9kLC,MAAA,QAMI,iBAAA,QnD2kLL,aAAA,QmDxkLK,uDACA,iBAAA,QAGJ,kCAEI,MAAA,QnDykLL,iBAA
 A,QmD1lLC,sDnD6lLD,oBAAA,QmD1lLG,eACA,aAAA,QnD8lLH,8BmDhmLC,MAAA,QAMI,iBAAA,QnD6lLL,aAAA,QmD1lLK,0DACA,iBAAA,QAGJ,qCAEI,MAAA,QnD2lLL,iBAAA,QmD5mLC,yDnD+mLD,oBAAA,QmD5mLG,cACA,aAAA,QnDgnLH,6BmDlnLC,MAAA,QAMI,iBAAA,QnD+mLL,aAAA,QmD5mLK,yDACA,iBAAA,QAGJ,oCAEI,MAAA,QnD6mLL,iBAAA,QoD5nLC,wDACA,oBAAA,QAEA,kBACA,SAAA,SpD+nLD,QAAA,MoDpoLC,OAAQ,EnDgqLR,QAAS,EACT,SAAU,OAEZ,yCmDtpLI,wBADA,yBAEA,yBACA,wBACA,SAAA,SACA,IAAA,EACA,OAAA,EpD+nLH,KAAA,EoD1nLC,MAAO,KACP,OAAA,KpD4nLD,OAAA,EoDvnLC,wBpD0nLD,eAAA,OqDppLC,uBACA,eAAA,IAEA,MACA,WAAA,KACA,QAAA,KjDwDA,cAAA,KACQ,iBAAA,QJgmLT,OAAA,IAAA,MAAA,QqD/pLC,cAAe,IASb,mBAAA,MAAA,EAAA,IAAA,IAAA,gBACA,WAAA,MAAA,EAAA,IAAA,IAAA,gBAKJ,iBACE,aAAA,KACA,aAAA,gBAEF,SACE,QAAA,KACA,cAAA,ICtBF,SACE,QAAA,IACA,cAAA,IAEA,OACA,MAAA,MACA,UAAA,KjCRA,YAAA,IAGA,YAAA,ErBqrLD,MAAA,KsD7qLC,YAAA,EAAA,IAAA,EAAA,KrDysLA,OAAQ,kBqDvsLN,QAAA,GjCbF,aiCeE,ajCZF,MAAA,KrB6rLD,gBAAA,KsDzqLC,OAAA,QACE,OAAA,kBACA,QAAA,GAEA,aACA,mBAAA,KtD2qLH,QAAA,EuDhsLC,OAAQ,QACR,WAAA,IvDksLD,OAAA,EuD7rLC,Y
 ACA,SAAA,OAEA,OACA,SAAA,MACA,IAAA,EACA,MAAA,EACA,OAAA,EACA,KAAA,EAIA,QAAA,KvD6rLD,QAAA,KuD1rLC,SAAA,OnD+GA,2BAAA,MACI,QAAA,EAEI,0BAkER,mBAAA,kBAAA,IAAA,SAEK,cAAA,aAAA,IAAA,SACG,WAAA,UAAA,IAAA,SJ6gLT,kBAAA,kBuDhsLC,cAAA,kBnD2GA,aAAA,kBACI,UAAA,kBAEI,wBJwlLT,kBAAA,euDpsLK,cAAe,eACnB,aAAA,eACA,UAAA,eAIF,mBACE,WAAA,OACA,WAAA,KvDqsLD,cuDhsLC,SAAU,SACV,MAAA,KACA,OAAA,KAEA,eACA,SAAA,SnDaA,iBAAA,KACQ,wBAAA,YmDZR,gBAAA,YtD4tLA,OsD5tLA,IAAA,MAAA,KAEA,OAAA,IAAA,MAAA,evDksLD,cAAA,IuD9rLC,QAAS,EACT,mBAAA,EAAA,IAAA,IAAA,eACA,WAAA,EAAA,IAAA,IAAA,eAEA,gBACA,SAAA,MACA,IAAA,EACA,MAAA,EvDgsLD,OAAA,EuD9rLC,KAAA,ElCrEA,QAAA,KAGA,iBAAA,KkCmEA,qBlCtEA,OAAA,iBAGA,QAAA,EkCwEF,mBACE,OAAA,kBACA,QAAA,GAIF,cACE,QAAA,KvDgsLD,cAAA,IAAA,MAAA,QuD3rLC,qBACA,WAAA,KAKF,aACE,OAAA,EACA,YAAA,WAIF,YACE,SAAA,SACA,QAAA,KvD0rLD,cuD5rLC,QAAS,KAQP,WAAA,MACA,WAAA,IAAA,MAAA,QATJ,wBAaI,cAAA,EvDsrLH,YAAA,IuDlrLG,mCvDqrLH,YAAA,KuD/qLC,oCACA,YAAA,EAEA,yBACA,SAAA,SvDkrLD,IAAA,QuDhqLC,MAAO,KAZP,OAAA,KACE,SAAA,OvDgrLD,yBuD7qLD,cnDvEA,M
 AAA,MACQ,OAAA,KAAA,KmD2ER,eAAY,mBAAA,EAAA,IAAA,KAAA,evD+qLX,WAAA,EAAA,IAAA,KAAA,euDzqLD,UAFA,MAAA,OvDirLD,yBwD/zLC,UACA,MAAA,OCNA,SAEA,SAAA,SACA,QAAA,KACA,QAAA,MACA,YAAA,iBAAA,UAAA,MAAA,WACA,UAAA,KACA,WAAA,OACA,YAAA,IACA,YAAA,WACA,WAAA,KACA,WAAA,MACA,gBAAA,KACA,YAAA,KACA,eAAA,KACA,eAAA,ODHA,WAAA,OnCVA,aAAA,OAGA,UAAA,OrBs1LD,YAAA,OwD30LC,OAAA,iBnCdA,QAAA,ErB61LD,WAAA,KwD90LY,YAAmB,OAAA,kBxDk1L/B,QAAA,GwDj1LY,aAAmB,QAAA,IAAA,ExDq1L/B,WAAA,KwDp1LY,eAAmB,QAAA,EAAA,IxDw1L/B,YAAA,IwDv1LY,gBAAmB,QAAA,IAAA,ExD21L/B,WAAA,IwDt1LC,cACA,QAAA,EAAA,IACA,YAAA,KAEA,eACA,UAAA,MxDy1LD,QAAA,IAAA,IwDr1LC,MAAO,KACP,WAAA,OACA,iBAAA,KACA,cAAA,IAEA,exDu1LD,SAAA,SwDn1LC,MAAA,EACE,OAAA,EACA,aAAA,YACA,aAAA,MAEA,4BxDq1LH,OAAA,EwDn1LC,KAAA,IACE,YAAA,KACA,aAAA,IAAA,IAAA,EACA,iBAAA,KAEA,iCxDq1LH,MAAA,IwDn1LC,OAAA,EACE,cAAA,KACA,aAAA,IAAA,IAAA,EACA,iBAAA,KAEA,kCxDq1LH,OAAA,EwDn1LC,KAAA,IACE,cAAA,KACA,aAAA,IAAA,IAAA,EACA,iBAAA,KAEA,8BxDq1LH,IAAA,IwDn1LC,KAAA,EACE,WAAA,KACA,aAAA,IAAA,IAAA,IAAA,EACA,mBAAA,KAEA,6BxDq1
 LH,IAAA,IwDn1LC,MAAA,EACE,WAAA,KACA,aAAA,IAAA,EAAA,IAAA,IACA,kBAAA,KAEA,+BxDq1LH,IAAA,EwDn1LC,KAAA,IACE,YAAA,KACA,aAAA,EAAA,IAAA,IACA,oBAAA,KAEA,oCxDq1LH,IAAA,EwDn1LC,MAAA,IACE,WAAA,KACA,aAAA,EAAA,IAAA,IACA,oBAAA,KAEA,qCxDq1LH,IAAA,E0Dl7LC,KAAM,IACN,WAAA,KACA,aAAA,EAAA,IAAA,IACA,oBAAA,KAEA,SACA,SAAA,SACA,IAAA,EDXA,KAAA,EAEA,QAAA,KACA,QAAA,KACA,UAAA,MACA,QAAA,IACA,YAAA,iBAAA,UAAA,MAAA,WACA,UAAA,KACA,WAAA,OACA,YAAA,IACA,YAAA,WACA,WAAA,KACA,WAAA,MACA,gBAAA,KACA,YAAA,KACA,eAAA,KCAA,eAAA,OAEA,WAAA,OACA,aAAA,OAAA,UAAA,OACA,YAAA,OACA,iBAAA,KACA,wBAAA,YtD8CA,gBAAA,YACQ,OAAA,IAAA,MAAA,KJk5LT,OAAA,IAAA,MAAA,e0D77LC,cAAA,IAAY,mBAAA,EAAA,IAAA,KAAA,e1Dg8Lb,WAAA,EAAA,IAAA,KAAA,e0D/7La,WAAA,KACZ,aAAY,WAAA,MACZ,eAAY,YAAA,KAGd,gBACE,WAAA,KAEA,cACA,YAAA,MAEA,e1Dq8LD,QAAA,IAAA,K0Dl8LC,OAAQ,EACR,UAAA,K1Do8LD,iBAAA,Q0D57LC,cAAA,IAAA,MAAA,QzDy9LA,cAAe,IAAI,IAAI,EAAE,EyDt9LvB,iBACA,QAAA,IAAA,KAEA,gBACA,sB1D87LH,SAAA,S0D37LC,QAAS,MACT,MAAA,E1D67LD,OAAA,E0D37LC,aAAc,YACd,aAAA,M1D87LD,gB0Dz7LC,aAAA,KAEE,sBAC
 A,QAAA,GACA,aAAA,KAEA,oB1D27LH,OAAA,M0D17LG,KAAA,IACE,YAAA,MACA,iBAAA,KACA,iBAAA,gBACA,oBAAA,E1D67LL,0B0Dz7LC,OAAA,IACE,YAAA,MACA,QAAA,IACA,iBAAA,KACA,oBAAA,EAEA,sB1D27LH,IAAA,I0D17LG,KAAA,MACE,WAAA,MACA,mBAAA,KACA,mBAAA,gBACA,kBAAA,E1D67LL,4B0Dz7LC,OAAA,MACE,KAAA,IACA,QAAA,IACA,mBAAA,KACA,kBAAA,EAEA,uB1D27LH,IAAA,M0D17LG,KAAA,IACE,YAAA,MACA,iBAAA,EACA,oBAAA,KACA,oBAAA,gB1D67LL,6B0Dx7LC,IAAA,IACE,YAAA,MACA,QAAA,IACA,iBAAA,EACA,oBAAA,KAEA,qB1D07LH,IAAA,I0Dz7LG,MAAA,MACE,WAAA,MACA,mBAAA,EACA,kBAAA,KACA,kBAAA,gB1D47LL,2B2DpjMC,MAAO,IACP,OAAA,M3DsjMD,QAAA,I2DnjMC,mBAAoB,EACpB,kBAAA,KAEA,U3DqjMD,SAAA,S2DljMG,gBACA,SAAA,SvD6KF,MAAA,KACK,SAAA,OJ04LN,sB2D/jMC,SAAU,S1D4lMV,QAAS,K0D9kML,mBAAA,IAAA,YAAA,K3DqjML,cAAA,IAAA,YAAA,K2D3hMC,WAAA,IAAA,YAAA,KvDmKK,4BAFL,0BAGQ,YAAA,EA3JA,qDA+GR,sBAEQ,mBAAA,kBAAA,IAAA,YJ86LP,cAAA,aAAA,IAAA,Y2DzjMG,WAAA,UAAA,IAAA,YvDmHJ,4BAAA,OACQ,oBAAA,OuDjHF,oBAAA,O3D4jML,YAAA,OI58LD,mCHs+LA,2BGr+LQ,KAAA,EuD5GF,kBAAA,sB3D6jML,UAAA,sBC2BD,kCADA,2BG5+LA,KAAA,EACQ,kBAAA,uB
 uDtGF,UAAA,uBArCN,6B3DomMD,gC2DpmMC,iC1D+nME,KAAM,E0DllMN,kBAAA,mB3D4jMH,UAAA,oBAGA,wB2D5mMD,sBAAA,sBAsDI,QAAA,MAEA,wB3D0jMH,KAAA,E2DtjMG,sB3DyjMH,sB2DrnMC,SAAU,SA+DR,IAAA,E3DyjMH,MAAA,KC0BD,sB0D/kMI,KAAA,KAnEJ,sBAuEI,KAAA,MAvEJ,2BA0EI,4B3DwjMH,KAAA,E2D/iMC,6BACA,KAAA,MAEA,8BACA,KAAA,KtC3FA,kBsC6FA,SAAA,SACA,IAAA,EACA,OAAA,EACA,KAAA,EACA,MAAA,I3DmjMD,UAAA,K2D9iMC,MAAA,KdnGE,WAAA,OACA,YAAA,EAAA,IAAA,IAAA,eACA,iBAAA,cAAA,OAAA,kBACA,QAAA,G7CqpMH,uB2DljMC,iBAAA,sEACE,iBAAA,iEACA,iBAAA,uFdxGA,iBAAA,kEACA,OAAA,+GACA,kBAAA,SACA,wBACA,MAAA,E7C6pMH,KAAA,K2DpjMC,iBAAA,sE1DglMA,iBAAiB,iE0D9kMf,iBAAA,uFACA,iBAAA,kEACA,OAAA,+GtCvHF,kBAAA,SsCyFF,wB3DslMC,wBC4BC,MAAO,KACP,gBAAiB,KACjB,OAAQ,kB0D7kMN,QAAA,EACA,QAAA,G3DwjMH,0C2DhmMD,2CA2CI,6BADA,6B1DklMF,SAAU,S0D7kMR,IAAA,IACA,QAAA,E3DqjMH,QAAA,a2DrmMC,WAAY,MAqDV,0CADA,6B3DsjMH,KAAA,I2D1mMC,YAAa,MA0DX,2CADA,6BAEA,MAAA,IACA,aAAA,MAME,6BADF,6B3DmjMH,MAAA,K2D9iMG,OAAA,KACE,YAAA,M3DgjML,YAAA,E2DriMC,oCACA,QAAA,QAEA,oCACA,QAAA,QAEA,qBACA,SAAA,SACA,OAAA,K3
 DwiMD,KAAA,I2DjjMC,QAAS,GAYP,MAAA,IACA,aAAA,EACA,YAAA,KACA,WAAA,OACA,WAAA,KAEA,wBACA,QAAA,aAWA,MAAA,KACA,OAAA,K3D8hMH,OAAA,I2D7jMC,YAAa,OAkCX,OAAA,QACA,iBAAA,OACA,iBAAA,cACA,OAAA,IAAA,MAAA,K3D8hMH,cAAA,K2DthMC,6BACA,MAAA,KACA,OAAA,KACA,OAAA,EACA,iBAAA,KAEA,kBACA,SAAA,SACA,MAAA,IACA,OAAA,K3DyhMD,KAAA,I2DxhMC,QAAA,GACE,YAAA,K3D0hMH,eAAA,K2Dj/LC,MAAO,KAhCP,WAAA,O1D8iMA,YAAa,EAAE,IAAI,IAAI,eAEzB,uB0D3iMM,YAAA,KAEA,oCACA,0C3DmhMH,2C2D3hMD,6BAAA,6BAYI,MAAA,K3DmhMH,OAAA,K2D/hMD,WAAA,M1D2jME,UAAW,KDxBZ,0C2D9gMD,6BACE,YAAA,MAEA,2C3DghMD,6B2D5gMD,aAAA,M3D+gMC,kBACF,MAAA,I4D7wMC,KAAA,I3DyyME,eAAgB,KAElB,qBACE,OAAQ,MAkBZ,qCADA,sCADA,mBADA,oBAXA,gBADA,iBAOA,uBADA,wBADA,iBADA,kBADA,wBADA,yBASA,mCADA,oC2DpzME,oBAAA,qBAAA,oBAAA,qB3D2zMF,WADA,YAOA,uBADA,wBADA,qBADA,sBADA,cADA,e2D/zMI,a3Dq0MJ,cDvBC,kB4D7yMG,mB3DqzMJ,WADA,YAwBE,QAAS,MACT,QAAS,IASX,qCADA,mBANA,gBAGA,uBADA,iBADA,wBAIA,mCDhBC,oB6D/0MC,oB5Dk2MF,W+B51MA,uBhCo0MC,qB4D5zMG,cChBF,aACA,kB5D+1MF,W+Br1ME,MAAO,KhCy0MR,cgCt0MC,QAAS,MACT,aAAA,KhCw0
 MD,YAAA,KgC/zMC,YhCk0MD,MAAA,gBgC/zMC,WhCk0MD,MAAA,egC/zMC,MhCk0MD,QAAA,e8Dz1MC,MACA,QAAA,gBAEA,WACA,WAAA,O9B8BF,WACE,KAAA,EAAA,EAAA,EhCg0MD,MAAA,YgCzzMC,YAAa,KACb,iBAAA,YhC2zMD,OAAA,E+D31MC,Q/D81MD,QAAA,eC4BD,OACE,SAAU,M+Dn4MV,chE42MD,MAAA,aC+BD,YADA,YADA,YADA,YAIE,QAAS,e+Dp5MT,kBhEs4MC,mBgEr4MD,yBhEi4MD,kB+Dl1MD,mBA6IA,yB9D4tMA,kBACA,mB8Dj3ME,yB9D62MF,kBACA,mBACA,yB+Dv5MY,QAAA,eACV,yBAAU,YhE04MT,QAAA,gBC4BD,iB+Dp6MU,QAAA,gBhE64MX,c+D51MG,QAAS,oB/Dg2MV,c+Dl2MC,c/Dm2MH,QAAA,sB+D91MG,yB/Dk2MD,kBACF,QAAA,iB+D91MG,yB/Dk2MD,mBACF,QAAA,kBgEh6MC,yBhEo6MC,yBgEn6MD,QAAA,wBACA,+CAAU,YhEw6MT,QAAA,gBC4BD,iB+Dl8MU,QAAA,gBhE26MX,c+Dr2MG,QAAS,oB/Dy2MV,c+D32MC,c/D42MH,QAAA,sB+Dv2MG,+C/D22MD,kBACF,QAAA,iB+Dv2MG,+C/D22MD,mBACF,QAAA,kBgE97MC,+ChEk8MC,yBgEj8MD,QAAA,wBACA,gDAAU,YhEs8MT,QAAA,gBC4BD,iB+Dh+MU,QAAA,gBhEy8MX,c+D92MG,QAAS,oB/Dk3MV,c+Dp3MC,c/Dq3MH,QAAA,sB+Dh3MG,gD/Do3MD,kBACF,QAAA,iB+Dh3MG,gD/Do3MD,mBACF,QAAA,kBgE59MC,gDhEg+MC,yBgE/9MD,QAAA,wBACA,0BAAU,YhEo+MT,QAAA,gBC4BD,iB+D9/MU,QAAA,gBhEu+
 MX,c+Dv3MG,QAAS,oB/D23MV,c+D73MC,c/D83MH,QAAA,sB+Dz3MG,0B/D63MD,kBACF,QAAA,iB+Dz3MG,0B/D63MD,mBACF,QAAA,kBgEl/MC,0BhEs/MC,yBACF,QAAA,wBgEv/MC,yBhE2/MC,WACF,QAAA,gBgE5/MC,+ChEggNC,WACF,QAAA,gBgEjgNC,gDhEqgNC,WACF,QAAA,gBAGA,0B+Dh3MC,WA4BE,QAAS,gBC5LX,eAAU,QAAA,eACV,aAAU,ehEyhNT,QAAA,gBC4BD,oB+DnjNU,QAAA,gBhE4hNX,iB+D93MG,QAAS,oBAMX,iB/D23MD,iB+Dt2MG,QAAS,sB/D22MZ,qB+D/3MC,QAAS,e/Dk4MV,a+D53MC,qBAcE,QAAS,iB/Dm3MZ,sB+Dh4MC,QAAS,e/Dm4MV,a+D73MC,sBAOE,QAAS,kB/D23MZ,4B+D53MC,QAAS,eCpLT,ahEojNC,4BACF,QAAA,wBC6BD,aACE,cACE,QAAS","sourcesContent":["/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\n\n//\n// 1. Set default font family to sans-serif.\n// 2. Prevent iOS and IE text size adjust after device orientation change,\n//    without disabling user zoom.\n//\n\nhtml {\n  font-family: sans-serif; // 1\n  -ms-text-size-adjust: 100%; // 2\n  -webkit-text-size-adjust: 100%; // 2\n}\n\n//\n// Remove default margin.\n//\n\nbody {\n  margin: 0;\n}\n\n// HTML5 display 
 definitions\n// ==========================================================================\n\n//\n// Correct `block` display not defined for any HTML5 element in IE 8/9.\n// Correct `block` display not defined for `details` or `summary` in IE 10/11\n// and Firefox.\n// Correct `block` display not defined for `main` in IE 11.\n//\n\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n  display: block;\n}\n\n//\n// 1. Correct `inline-block` display not defined in IE 8/9.\n// 2. Normalize vertical alignment of `progress` in Chrome, Firefox, and Opera.\n//\n\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block; // 1\n  vertical-align: baseline; // 2\n}\n\n//\n// Prevent modern browsers from displaying `audio` without controls.\n// Remove excess height in iOS 5 devices.\n//\n\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n\n//\n// Address `[hidden]` styling not present in IE 8/9/10.\n// Hide the `tem
 plate` element in IE 8/9/10/11, Safari, and Firefox < 22.\n//\n\n[hidden],\ntemplate {\n  display: none;\n}\n\n// Links\n// ==========================================================================\n\n//\n// Remove the gray background color from active links in IE 10.\n//\n\na {\n  background-color: transparent;\n}\n\n//\n// Improve readability of focused elements when they are also in an\n// active/hover state.\n//\n\na:active,\na:hover {\n  outline: 0;\n}\n\n// Text-level semantics\n// ==========================================================================\n\n//\n// Address styling not present in IE 8/9/10/11, Safari, and Chrome.\n//\n\nabbr[title] {\n  border-bottom: 1px dotted;\n}\n\n//\n// Address style set to `bolder` in Firefox 4+, Safari, and Chrome.\n//\n\nb,\nstrong {\n  font-weight: bold;\n}\n\n//\n// Address styling not present in Safari and Chrome.\n//\n\ndfn {\n  font-style: italic;\n}\n\n//\n// Address variable `h1` font-size and margin within `section` and `artic
 le`\n// contexts in Firefox 4+, Safari, and Chrome.\n//\n\nh1 {\n  font-size: 2em;\n  margin: 0.67em 0;\n}\n\n//\n// Address styling not present in IE 8/9.\n//\n\nmark {\n  background: #ff0;\n  color: #000;\n}\n\n//\n// Address inconsistent and variable font size in all browsers.\n//\n\nsmall {\n  font-size: 80%;\n}\n\n//\n// Prevent `sub` and `sup` affecting `line-height` in all browsers.\n//\n\nsub,\nsup {\n  font-size: 75%;\n  line-height: 0;\n  position: relative;\n  vertical-align: baseline;\n}\n\nsup {\n  top: -0.5em;\n}\n\nsub {\n  bottom: -0.25em;\n}\n\n// Embedded content\n// ==========================================================================\n\n//\n// Remove border when inside `a` element in IE 8/9/10.\n//\n\nimg {\n  border: 0;\n}\n\n//\n// Correct overflow not hidden in IE 9/10/11.\n//\n\nsvg:not(:root) {\n  overflow: hidden;\n}\n\n// Grouping content\n// ==========================================================================\n\n//\n// Address margin not presen
 t in IE 8/9 and Safari.\n//\n\nfigure {\n  margin: 1em 40px;\n}\n\n//\n// Address differences between Firefox and other browsers.\n//\n\nhr {\n  box-sizing: content-box;\n  height: 0;\n}\n\n//\n// Contain overflow in all browsers.\n//\n\npre {\n  overflow: auto;\n}\n\n//\n// Address odd `em`-unit font size rendering in all browsers.\n//\n\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\n\n// Forms\n// ==========================================================================\n\n//\n// Known limitation: by default, Chrome and Safari on OS X allow very limited\n// styling of `select`, unless a `border` property is set.\n//\n\n//\n// 1. Correct color not being inherited.\n//    Known issue: affects color of disabled elements.\n// 2. Correct font properties not being inherited.\n// 3. Address margins set differently in Firefox 4+, Safari, and Chrome.\n//\n\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  color: inherit; // 1\n  font: inherit; //
  2\n  margin: 0; // 3\n}\n\n//\n// Address `overflow` set to `hidden` in IE 8/9/10/11.\n//\n\nbutton {\n  overflow: visible;\n}\n\n//\n// Address inconsistent `text-transform` inheritance for `button` and `select`.\n// All other form control elements do not inherit `text-transform` values.\n// Correct `button` style inheritance in Firefox, IE 8/9/10/11, and Opera.\n// Correct `select` style inheritance in Firefox.\n//\n\nbutton,\nselect {\n  text-transform: none;\n}\n\n//\n// 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio`\n//    and `video` controls.\n// 2. Correct inability to style clickable `input` types in iOS.\n// 3. Improve usability and consistency of cursor style between image-type\n//    `input` and others.\n//\n\nbutton,\nhtml input[type=\"button\"], // 1\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button; // 2\n  cursor: pointer; // 3\n}\n\n//\n// Re-set default cursor for disabled elements.\n//\n\nbutton[disabled],
 \nhtml input[disabled] {\n  cursor: default;\n}\n\n//\n// Remove inner padding and border in Firefox 4+.\n//\n\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  border: 0;\n  padding: 0;\n}\n\n//\n// Address Firefox 4+ setting `line-height` on `input` using `!important` in\n// the UA stylesheet.\n//\n\ninput {\n  line-height: normal;\n}\n\n//\n// It's recommended that you don't attempt to style these elements.\n// Firefox's implementation doesn't respect box-sizing, padding, or width.\n//\n// 1. Address box sizing set to `content-box` in IE 8/9/10.\n// 2. Remove excess padding in IE 8/9/10.\n//\n\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  box-sizing: border-box; // 1\n  padding: 0; // 2\n}\n\n//\n// Fix the cursor style for Chrome's increment/decrement buttons. For certain\n// `font-size` values of the `input`, it causes the cursor style of the\n// decrement button to change from `default` to `text`.\n//\n\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput
 [type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\n\n//\n// 1. Address `appearance` set to `searchfield` in Safari and Chrome.\n// 2. Address `box-sizing` set to `border-box` in Safari and Chrome.\n//\n\ninput[type=\"search\"] {\n  -webkit-appearance: textfield; // 1\n  box-sizing: content-box; //2\n}\n\n//\n// Remove inner padding and search cancel button in Safari and Chrome on OS X.\n// Safari (but not Chrome) clips the cancel button when the search input has\n// padding (and `textfield` appearance).\n//\n\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\n\n//\n// Define consistent border, margin, and padding.\n//\n\nfieldset {\n  border: 1px solid #c0c0c0;\n  margin: 0 2px;\n  padding: 0.35em 0.625em 0.75em;\n}\n\n//\n// 1. Correct `color` not being inherited in IE 8/9/10/11.\n// 2. Remove padding so people aren't caught out if they zero out fieldsets.\n//\n\nlegend {\n  b
 order: 0; // 1\n  padding: 0; // 2\n}\n\n//\n// Remove default vertical scrollbar in IE 8/9/10/11.\n//\n\ntextarea {\n  overflow: auto;\n}\n\n//\n// Don't inherit the `font-weight` (applied by a rule above).\n// NOTE: the default cannot safely be changed in Chrome and Safari on OS X.\n//\n\noptgroup {\n  font-weight: bold;\n}\n\n// Tables\n// ==========================================================================\n\n//\n// Remove most spacing between table cells.\n//\n\ntable {\n  border-collapse: collapse;\n  border-spacing: 0;\n}\n\ntd,\nth {\n  padding: 0;\n}\n","/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n\n// ==========================================================================\n// Print styles.\n// Inlined to avoid the additional HTTP request: h5bp.com/r\n// ==========================================================================\n\n@media print {\n    *,\n    *:before,\n    *:after {\n        background: transparent !import
 ant;\n        color: #000 !important; // Black prints faster: h5bp.com/s\n        box-shadow: none !important;\n        text-shadow: none !important;\n    }\n\n    a,\n    a:visited {\n        text-decoration: underline;\n    }\n\n    a[href]:after {\n        content: \" (\" attr(href) \")\";\n    }\n\n    abbr[title]:after {\n        content: \" (\" attr(title) \")\";\n    }\n\n    // Don't show links that are fragment identifiers,\n    // or use the `javascript:` pseudo protocol\n    a[href^=\"#\"]:after,\n    a[href^=\"javascript:\"]:after {\n        content: \"\";\n    }\n\n    pre,\n    blockquote {\n        border: 1px solid #999;\n        page-break-inside: avoid;\n    }\n\n    thead {\n        display: table-header-group; // h5bp.com/t\n    }\n\n    tr,\n    img {\n        page-break-inside: avoid;\n    }\n\n    img {\n        max-width: 100% !important;\n    }\n\n    p,\n    h2,\n    h3 {\n        orphans: 3;\n        widows: 3;\n    }\n\n    h2,\n    h3 {\n        page-bre
 ak-after: avoid;\n    }\n\n    // Bootstrap specific changes start\n\n    // Bootstrap components\n    .navbar {\n        display: none;\n    }\n    .btn,\n    .dropup > .btn {\n        > .caret {\n            border-top-color: #000 !important;\n        }\n    }\n    .label {\n        border: 1px solid #000;\n    }\n\n    .table {\n        border-collapse: collapse !important;\n\n        td,\n        th {\n            background-color: #fff !important;\n        }\n    }\n    .table-bordered {\n        th,\n        td {\n            border: 1px solid #ddd !important;\n        }\n    }\n\n    // Bootstrap specific changes end\n}\n","/*!\n * Bootstrap v3.3.7 (http://getbootstrap.com)\n * Copyright 2011-2016 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\nhtml {\n  font-family: sans-serif;\n  -ms-text-size-adjust: 100%;\n  -webkit-text-size-adjust: 100%;\n}\nb
 ody {\n  margin: 0;\n}\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n  display: block;\n}\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block;\n  vertical-align: baseline;\n}\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n[hidden],\ntemplate {\n  display: none;\n}\na {\n  background-color: transparent;\n}\na:active,\na:hover {\n  outline: 0;\n}\nabbr[title] {\n  border-bottom: 1px dotted;\n}\nb,\nstrong {\n  font-weight: bold;\n}\ndfn {\n  font-style: italic;\n}\nh1 {\n  font-size: 2em;\n  margin: 0.67em 0;\n}\nmark {\n  background: #ff0;\n  color: #000;\n}\nsmall {\n  font-size: 80%;\n}\nsub,\nsup {\n  font-size: 75%;\n  line-height: 0;\n  position: relative;\n  vertical-align: baseline;\n}\nsup {\n  top: -0.5em;\n}\nsub {\n  bottom: -0.25em;\n}\nimg {\n  border: 0;\n}\nsvg:not(:root) {\n  overflow: hidden;\n}\nfigure {\n  margin: 1em 40px;\n}\nhr {\n  box-sizing: content-box;\n  height
 : 0;\n}\npre {\n  overflow: auto;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  color: inherit;\n  font: inherit;\n  margin: 0;\n}\nbutton {\n  overflow: visible;\n}\nbutton,\nselect {\n  text-transform: none;\n}\nbutton,\nhtml input[type=\"button\"],\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button;\n  cursor: pointer;\n}\nbutton[disabled],\nhtml input[disabled] {\n  cursor: default;\n}\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  border: 0;\n  padding: 0;\n}\ninput {\n  line-height: normal;\n}\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  box-sizing: border-box;\n  padding: 0;\n}\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\ninput[type=\"search\"] {\n  -webkit-appearance: textfield;\n  box-sizing: content-box;\n}\ninput[type=\"search\"]::-webkit-search-canc
 el-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\nfieldset {\n  border: 1px solid #c0c0c0;\n  margin: 0 2px;\n  padding: 0.35em 0.625em 0.75em;\n}\nlegend {\n  border: 0;\n  padding: 0;\n}\ntextarea {\n  overflow: auto;\n}\noptgroup {\n  font-weight: bold;\n}\ntable {\n  border-collapse: collapse;\n  border-spacing: 0;\n}\ntd,\nth {\n  padding: 0;\n}\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n@media print {\n  *,\n  *:before,\n  *:after {\n    background: transparent !important;\n    color: #000 !important;\n    box-shadow: none !important;\n    text-shadow: none !important;\n  }\n  a,\n  a:visited {\n    text-decoration: underline;\n  }\n  a[href]:after {\n    content: \" (\" attr(href) \")\";\n  }\n  abbr[title]:after {\n    content: \" (\" attr(title) \")\";\n  }\n  a[href^=\"#\"]:after,\n  a[href^=\"javascript:\"]:after {\n    content: \"\";\n  }\n  pre,\n  blockquote {\n    border: 1px s
 olid #999;\n    page-break-inside: avoid;\n  }\n  thead {\n    display: table-header-group;\n  }\n  tr,\n  img {\n    page-break-inside: avoid;\n  }\n  img {\n    max-width: 100% !important;\n  }\n  p,\n  h2,\n  h3 {\n    orphans: 3;\n    widows: 3;\n  }\n  h2,\n  h3 {\n    page-break-after: avoid;\n  }\n  .navbar {\n    display: none;\n  }\n  .btn > .caret,\n  .dropup > .btn > .caret {\n    border-top-color: #000 !important;\n  }\n  .label {\n    border: 1px solid #000;\n  }\n  .table {\n    border-collapse: collapse !important;\n  }\n  .table td,\n  .table th {\n    background-color: #fff !important;\n  }\n  .table-bordered th,\n  .table-bordered td {\n    border: 1px solid #ddd !important;\n  }\n}\n@font-face {\n  font-family: 'Glyphicons Halflings';\n  src: url('../fonts/glyphicons-halflings-regular.eot');\n  src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff2') format('woff2'), url('../fonts/
 glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');\n}\n.glyphicon {\n  position: relative;\n  top: 1px;\n  display: inline-block;\n  font-family: 'Glyphicons Halflings';\n  font-style: normal;\n  font-weight: normal;\n  line-height: 1;\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n.glyphicon-asterisk:before {\n  content: \"\\002a\";\n}\n.glyphicon-plus:before {\n  content: \"\\002b\";\n}\n.glyphicon-euro:before,\n.glyphicon-eur:before {\n  content: \"\\20ac\";\n}\n.glyphicon-minus:before {\n  content: \"\\2212\";\n}\n.glyphicon-cloud:before {\n  content: \"\\2601\";\n}\n.glyphicon-envelope:before {\n  content: \"\\2709\";\n}\n.glyphicon-pencil:before {\n  content: \"\\270f\";\n}\n.glyphicon-glass:before {\n  content: \"\\e001\";\n}\n.glyphicon-music:before {\n  content: \"\\e002\";\n}\n.
 glyphicon-search:before {\n  content: \"\\e003\";\n}\n.glyphicon-heart:before {\n  content: \"\\e005\";\n}\n.glyphicon-star:before {\n  content: \"\\e006\";\n}\n.glyphicon-star-empty:before {\n  content: \"\\e007\";\n}\n.glyphicon-user:before {\n  content: \"\\e008\";\n}\n.glyphicon-film:before {\n  content: \"\\e009\";\n}\n.glyphicon-th-large:before {\n  content: \"\\e010\";\n}\n.glyphicon-th:before {\n  content: \"\\e011\";\n}\n.glyphicon-th-list:before {\n  content: \"\\e012\";\n}\n.glyphicon-ok:before {\n  content: \"\\e013\";\n}\n.glyphicon-remove:before {\n  content: \"\\e014\";\n}\n.glyphicon-zoom-in:before {\n  content: \"\\e015\";\n}\n.glyphicon-zoom-out:before {\n  content: \"\\e016\";\n}\n.glyphicon-off:before {\n  content: \"\\e017\";\n}\n.glyphicon-signal:before {\n  content: \"\\e018\";\n}\n.glyphicon-cog:before {\n  content: \"\\e019\";\n}\n.glyphicon-trash:before {\n  content: \"\\e020\";\n}\n.glyphicon-home:before {\n  content: \"\\e021\";\n}\n.glyphicon-file:before
  {\n  content: \"\\e022\";\n}\n.glyphicon-time:before {\n  content: \"\\e023\";\n}\n.glyphicon-road:before {\n  content: \"\\e024\";\n}\n.glyphicon-download-alt:before {\n  content: \"\\e025\";\n}\n.glyphicon-download:before {\n  content: \"\\e026\";\n}\n.glyphicon-upload:before {\n  content: \"\\e027\";\n}\n.glyphicon-inbox:before {\n  content: \"\\e028\";\n}\n.glyphicon-play-circle:before {\n  content: \"\\e029\";\n}\n.glyphicon-repeat:before {\n  content: \"\\e030\";\n}\n.glyphicon-refresh:before {\n  content: \"\\e031\";\n}\n.glyphicon-list-alt:before {\n  content: \"\\e032\";\n}\n.glyphicon-lock:before {\n  content: \"\\e033\";\n}\n.glyphicon-flag:before {\n  content: \"\\e034\";\n}\n.glyphicon-headphones:before {\n  content: \"\\e035\";\n}\n.glyphicon-volume-off:before {\n  content: \"\\e036\";\n}\n.glyphicon-volume-down:before {\n  content: \"\\e037\";\n}\n.glyphicon-volume-up:before {\n  content: \"\\e038\";\n}\n.glyphicon-qrcode:before {\n  content: \"\\e039\";\n}\n.glyphic
 on-barcode:before {\n  content: \"\\e040\";\n}\n.glyphicon-tag:before {\n  content: \"\\e041\";\n}\n.glyphicon-tags:before {\n  content: \"\\e042\";\n}\n.glyphicon-book:before {\n  content: \"\\e043\";\n}\n.glyphicon-bookmark:before {\n  content: \"\\e044\";\n}\n.glyphicon-print:before {\n  content: \"\\e045\";\n}\n.glyphicon-camera:before {\n  content: \"\\e046\";\n}\n.glyphicon-font:before {\n  content: \"\\e047\";\n}\n.glyphicon-bold:before {\n  content: \"\\e048\";\n}\n.glyphicon-italic:before {\n  content: \"\\e049\";\n}\n.glyphicon-text-height:before {\n  content: \"\\e050\";\n}\n.glyphicon-text-width:before {\n  content: \"\\e051\";\n}\n.glyphicon-align-left:before {\n  content: \"\\e052\";\n}\n.glyphicon-align-center:before {\n  content: \"\\e053\";\n}\n.glyphicon-align-right:before {\n  content: \"\\e054\";\n}\n.glyphicon-align-justify:before {\n  content: \"\\e055\";\n}\n.glyphicon-list:before {\n  content: \"\\e056\";\n}\n.glyphicon-indent-left:before {\n  content: \"\\e0
 57\";\n}\n.glyphicon-indent-right:before {\n  content: \"\\e058\";\n}\n.glyphicon-facetime-video:before {\n  content: \"\\e059\";\n}\n.glyphicon-picture:before {\n  content: \"\\e060\";\n}\n.glyphicon-map-marker:before {\n  content: \"\\e062\";\n}\n.glyphicon-adjust:before {\n  content: \"\\e063\";\n}\n.glyphicon-tint:before {\n  content: \"\\e064\";\n}\n.glyphicon-edit:before {\n  content: \"\\e065\";\n}\n.glyphicon-share:before {\n  content: \"\\e066\";\n}\n.glyphicon-check:before {\n  content: \"\\e067\";\n}\n.glyphicon-move:before {\n  content: \"\\e068\";\n}\n.glyphicon-step-backward:before {\n  content: \"\\e069\";\n}\n.glyphicon-fast-backward:before {\n  content: \"\\e070\";\n}\n.glyphicon-backward:before {\n  content: \"\\e071\";\n}\n.glyphicon-play:before {\n  content: \"\\e072\";\n}\n.glyphicon-pause:before {\n  content: \"\\e073\";\n}\n.glyphicon-stop:before {\n  content: \"\\e074\";\n}\n.glyphicon-forward:before {\n  content: \"\\e075\";\n}\n.glyphicon-fast-forward:befor
 e {\n  content: \"\\e076\";\n}\n.glyphicon-step-forward:before {\n  content: \"\\e077\";\n}\n.glyphicon-eject:before {\n  content: \"\\e078\";\n}\n.glyphicon-chevron-left:before {\n  content: \"\\e079\";\n}\n.glyphicon-chevron-right:before {\n  content: \"\\e080\";\n}\n.glyphicon-plus-sign:before {\n  content: \"\\e081\";\n}\n.glyphicon-minus-sign:before {\n  content: \"\\e082\";\n}\n.glyphicon-remove-sign:before {\n  content: \"\\e083\";\n}\n.glyphicon-ok-sign:before {\n  content: \"\\e084\";\n}\n.glyphicon-question-sign:before {\n  content: \"\\e085\";\n}\n.glyphicon-info-sign:before {\n  content: \"\\e086\";\n}\n.glyphicon-screenshot:before {\n  content: \"\\e087\";\n}\n.glyphicon-remove-circle:before {\n  content: \"\\e088\";\n}\n.glyphicon-ok-circle:before {\n  content: \"\\e089\";\n}\n.glyphicon-ban-circle:before {\n  content: \"\\e090\";\n}\n.glyphicon-arrow-left:before {\n  content: \"\\e091\";\n}\n.glyphicon-arrow-right:before {\n  content: \"\\e092\";\n}\n.glyphicon-arrow-
 up:before {\n  content: \"\\e093\";\n}\n.glyphicon-arrow-down:before {\n  content: \"\\e094\";\n}\n.glyphicon-share-alt:before {\n  content: \"\\e095\";\n}\n.glyphicon-resize-full:before {\n  content: \"\\e096\";\n}\n.glyphicon-resize-small:before {\n  content: \"\\e097\";\n}\n.glyphicon-exclamation-sign:before {\n  content: \"\\e101\";\n}\n.glyphicon-gift:before {\n  content: \"\\e102\";\n}\n.glyphicon-leaf:before {\n  content: \"\\e103\";\n}\n.glyphicon-fire:before {\n  content: \"\\e104\";\n}\n.glyphicon-eye-open:before {\n  content: \"\\e105\";\n}\n.glyphicon-eye-close:before {\n  content: \"\\e106\";\n}\n.glyphicon-warning-sign:before {\n  content: \"\\e107\";\n}\n.glyphicon-plane:before {\n  content: \"\\e108\";\n}\n.glyphicon-calendar:before {\n  content: \"\\e109\";\n}\n.glyphicon-random:before {\n  content: \"\\e110\";\n}\n.glyphicon-comment:before {\n  content: \"\\e111\";\n}\n.glyphicon-magnet:before {\n  content: \"\\e112\";\n}\n.glyphicon-chevron-up:before {\n  content:
  \"\\e113\";\n}\n.glyphicon-chevron-down:before {\n  content: \"\\e114\";\n}\n.glyphicon-retweet:before {\n  content: \"\\e115\";\n}\n.glyphicon-shopping-cart:before {\n  content: \"\\e116\";\n}\n.glyphicon-folder-close:before {\n  content: \"\\e117\";\n}\n.glyphicon-folder-open:before {\n  content: \"\\e118\";\n}\n.glyphicon-resize-vertical:before {\n  content: \"\\e119\";\n}\n.glyphicon-resize-horizontal:before {\n  content: \"\\e120\";\n}\n.glyphicon-hdd:before {\n  content: \"\\e121\";\n}\n.glyphicon-bullhorn:before {\n  content: \"\\e122\";\n}\n.glyphicon-bell:before {\n  content: \"\\e123\";\n}\n.glyphicon-certificate:before {\n  content: \"\\e124\";\n}\n.glyphicon-thumbs-up:before {\n  content: \"\\e125\";\n}\n.glyphicon-thumbs-down:before {\n  content: \"\\e126\";\n}\n.glyphicon-hand-right:before {\n  content: \"\\e127\";\n}\n.glyphicon-hand-left:before {\n  content: \"\\e128\";\n}\n.glyphicon-hand-up:before {\n  content: \"\\e129\";\n}\n.glyphicon-hand-down:before {\n  cont
 ent: \"\\e130\";\n}\n.glyphicon-circle-arrow-right:before {\n  content: \"\\e131\";\n}\n.glyphicon-circle-arrow-left:before {\n  content: \"\\e132\";\n}\n.glyphicon-circle-arrow-up:before {\n  content: \"\\e133\";\n}\n.glyphicon-circle-arrow-down:before {\n  content: \"\\e134\";\n}\n.glyphicon-globe:before {\n  content: \"\\e135\";\n}\n.glyphicon-wrench:before {\n  content: \"\\e136\";\n}\n.glyphicon-tasks:before {\n  content: \"\\e137\";\n}\n.glyphicon-filter:before {\n  content: \"\\e138\";\n}\n.glyphicon-briefcase:before {\n  content: \"\\e139\";\n}\n.glyphicon-fullscreen:before {\n  content: \"\\e140\";\n}\n.glyphicon-dashboard:before {\n  content: \"\\e141\";\n}\n.glyphicon-paperclip:before {\n  content: \"\\e142\";\n}\n.glyphicon-heart-empty:before {\n  content: \"\\e143\";\n}\n.glyphicon-link:before {\n  content: \"\\e144\";\n}\n.glyphicon-phone:before {\n  content: \"\\e145\";\n}\n.glyphicon-pushpin:before {\n  content: \"\\e146\";\n}\n.glyphicon-usd:before {\n  content: \"\
 \e148\";\n}\n.glyphicon-gbp:before {\n  content: \"\\e149\";\n}\n.glyphicon-sort:before {\n  content: \"\\e150\";\n}\n.glyphicon-sort-by-alphabet:before {\n  content: \"\\e151\";\n}\n.glyphicon-sort-by-alphabet-alt:before {\n  content: \"\\e152\";\n}\n.glyphicon-sort-by-order:before {\n  content: \"\\e153\";\n}\n.glyphicon-sort-by-order-alt:before {\n  content: \"\\e154\";\n}\n.glyphicon-sort-by-attributes:before {\n  content: \"\\e155\";\n}\n.glyphicon-sort-by-attributes-alt:before {\n  content: \"\\e156\";\n}\n.glyphicon-unchecked:before {\n  content: \"\\e157\";\n}\n.glyphicon-expand:before {\n  content: \"\\e158\";\n}\n.glyphicon-collapse-down:before {\n  content: \"\\e159\";\n}\n.glyphicon-collapse-up:before {\n  content: \"\\e160\";\n}\n.glyphicon-log-in:before {\n  content: \"\\e161\";\n}\n.glyphicon-flash:before {\n  content: \"\\e162\";\n}\n.glyphicon-log-out:before {\n  content: \"\\e163\";\n}\n.glyphicon-new-window:before {\n  content: \"\\e164\";\n}\n.glyphicon-record:be
 fore {\n  content: \"\\e165\";\n}\n.glyphicon-save:before {\n  content: \"\\e166\";\n}\n.glyphicon-open:before {\n  content: \"\\e167\";\n}\n.glyphicon-saved:before {\n  content: \"\\e168\";\n}\n.glyphicon-import:before {\n  content: \"\\e169\";\n}\n.glyphicon-export:before {\n  content: \"\\e170\";\n}\n.glyphicon-send:before {\n  content: \"\\e171\";\n}\n.glyphicon-floppy-disk:before {\n  content: \"\\e172\";\n}\n.glyphicon-floppy-saved:before {\n  content: \"\\e173\";\n}\n.glyphicon-floppy-remove:before {\n  content: \"\\e174\";\n}\n.glyphicon-floppy-save:before {\n  content: \"\\e175\";\n}\n.glyphicon-floppy-open:before {\n  content: \"\\e176\";\n}\n.glyphicon-credit-card:before {\n  content: \"\\e177\";\n}\n.glyphicon-transfer:before {\n  content: \"\\e178\";\n}\n.glyphicon-cutlery:before {\n  content: \"\\e179\";\n}\n.glyphicon-header:before {\n  content: \"\\e180\";\n}\n.glyphicon-compressed:before {\n  content: \"\\e181\";\n}\n.glyphicon-earphone:before {\n  content: \"\\e182
 \";\n}\n.glyphicon-phone-alt:before {\n  content: \"\\e183\";\n}\n.glyphicon-tower:before {\n  content: \"\\e184\";\n}\n.glyphicon-stats:before {\n  content: \"\\e185\";\n}\n.glyphicon-sd-video:before {\n  content: \"\\e186\";\n}\n.glyphicon-hd-video:before {\n  content: \"\\e187\";\n}\n.glyphicon-subtitles:before {\n  content: \"\\e188\";\n}\n.glyphicon-sound-stereo:before {\n  content: \"\\e189\";\n}\n.glyphicon-sound-dolby:before {\n  content: \"\\e190\";\n}\n.glyphicon-sound-5-1:before {\n  content: \"\\e191\";\n}\n.glyphicon-sound-6-1:before {\n  content: \"\\e192\";\n}\n.glyphicon-sound-7-1:before {\n  content: \"\\e193\";\n}\n.glyphicon-copyright-mark:before {\n  content: \"\\e194\";\n}\n.glyphicon-registration-mark:before {\n  content: \"\\e195\";\n}\n.glyphicon-cloud-download:before {\n  content: \"\\e197\";\n}\n.glyphicon-cloud-upload:before {\n  content: \"\\e198\";\n}\n.glyphicon-tree-conifer:before {\n  content: \"\\e199\";\n}\n.glyphicon-tree-deciduous:before {\n  cont
 ent: \"\\e200\";\n}\n.glyphicon-cd:before {\n  content: \"\\e201\";\n}\n.glyphicon-save-file:before {\n  content: \"\\e202\";\n}\n.glyphicon-open-file:before {\n  content: \"\\e203\";\n}\n.glyphicon-level-up:before {\n  content: \"\\e204\";\n}\n.glyphicon-copy:before {\n  content: \"\\e205\";\n}\n.glyphicon-paste:before {\n  content: \"\\e206\";\n}\n.glyphicon-alert:before {\n  content: \"\\e209\";\n}\n.glyphicon-equalizer:before {\n  content: \"\\e210\";\n}\n.glyphicon-king:before {\n  content: \"\\e211\";\n}\n.glyphicon-queen:before {\n  content: \"\\e212\";\n}\n.glyphicon-pawn:before {\n  content: \"\\e213\";\n}\n.glyphicon-bishop:before {\n  content: \"\\e214\";\n}\n.glyphicon-knight:before {\n  content: \"\\e215\";\n}\n.glyphicon-baby-formula:before {\n  content: \"\\e216\";\n}\n.glyphicon-tent:before {\n  content: \"\\26fa\";\n}\n.glyphicon-blackboard:before {\n  content: \"\\e218\";\n}\n.glyphicon-bed:before {\n  content: \"\\e219\";\n}\n.glyphicon-apple:before {\n  content: 
 \"\\f8ff\";\n}\n.glyphicon-erase:before {\n  content: \"\\e221\";\n}\n.glyphicon-hourglass:before {\n  content: \"\\231b\";\n}\n.glyphicon-lamp:before {\n  content: \"\\e223\";\n}\n.glyphicon-duplicate:before {\n  content: \"\\e224\";\n}\n.glyphicon-piggy-bank:before {\n  content: \"\\e225\";\n}\n.glyphicon-scissors:before {\n  content: \"\\e226\";\n}\n.glyphicon-bitcoin:before {\n  content: \"\\e227\";\n}\n.glyphicon-btc:before {\n  content: \"\\e227\";\n}\n.glyphicon-xbt:before {\n  content: \"\\e227\";\n}\n.glyphicon-yen:before {\n  content: \"\\00a5\";\n}\n.glyphicon-jpy:before {\n  content: \"\\00a5\";\n}\n.glyphicon-ruble:before {\n  content: \"\\20bd\";\n}\n.glyphicon-rub:before {\n  content: \"\\20bd\";\n}\n.glyphicon-scale:before {\n  content: \"\\e230\";\n}\n.glyphicon-ice-lolly:before {\n  content: \"\\e231\";\n}\n.glyphicon-ice-lolly-tasted:before {\n  content: \"\\e232\";\n}\n.glyphicon-education:before {\n  content: \"\\e233\";\n}\n.glyphicon-option-horizontal:before {
 \n  content: \"\\e234\";\n}\n.glyphicon-option-vertical:before {\n  content: \"\\e235\";\n}\n.glyphicon-menu-hamburger:before {\n  content: \"\\e236\";\n}\n.glyphicon-modal-window:before {\n  content: \"\\e237\";\n}\n.glyphicon-oil:before {\n  content: \"\\e238\";\n}\n.glyphicon-grain:before {\n  content: \"\\e239\";\n}\n.glyphicon-sunglasses:before {\n  content: \"\\e240\";\n}\n.glyphicon-text-size:before {\n  content: \"\\e241\";\n}\n.glyphicon-text-color:before {\n  content: \"\\e242\";\n}\n.glyphicon-text-background:before {\n  content: \"\\e243\";\n}\n.glyphicon-object-align-top:before {\n  content: \"\\e244\";\n}\n.glyphicon-object-align-bottom:before {\n  content: \"\\e245\";\n}\n.glyphicon-object-align-horizontal:before {\n  content: \"\\e246\";\n}\n.glyphicon-object-align-left:before {\n  content: \"\\e247\";\n}\n.glyphicon-object-align-vertical:before {\n  content: \"\\e248\";\n}\n.glyphicon-object-align-right:before {\n  content: \"\\e249\";\n}\n.glyphicon-triangle-right:
 before {\n  content: \"\\e250\";\n}\n.glyphicon-triangle-left:before {\n  content: \"\\e251\";\n}\n.glyphicon-triangle-bottom:before {\n  content: \"\\e252\";\n}\n.glyphicon-triangle-top:before {\n  content: \"\\e253\";\n}\n.glyphicon-console:before {\n  content: \"\\e254\";\n}\n.glyphicon-superscript:before {\n  content: \"\\e255\";\n}\n.glyphicon-subscript:before {\n  content: \"\\e256\";\n}\n.glyphicon-menu-left:before {\n  content: \"\\e257\";\n}\n.glyphicon-menu-right:before {\n  content: \"\\e258\";\n}\n.glyphicon-menu-down:before {\n  content: \"\\e259\";\n}\n.glyphicon-menu-up:before {\n  content: \"\\e260\";\n}\n* {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n}\n*:before,\n*:after {\n  -webkit-box-sizing: border-box;\n  -moz-box-sizing: border-box;\n  box-sizing: border-box;\n}\nhtml {\n  font-size: 10px;\n  -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\nbody {\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-
 serif;\n  font-size: 14px;\n  line-height: 1.42857143;\n  color: #333333;\n  background-color: #fff;\n}\ninput,\nbutton,\nselect,\ntextarea {\n  font-family: inherit;\n  font-size: inherit;\n  line-height: inherit;\n}\na {\n  color: #337ab7;\n  text-decoration: none;\n}\na:hover,\na:focus {\n  color: #23527c;\n  text-decoration: underline;\n}\na:focus {\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\nfigure {\n  margin: 0;\n}\nimg {\n  vertical-align: middle;\n}\n.img-responsive,\n.thumbnail > img,\n.thumbnail a > img,\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n  display: block;\n  max-width: 100%;\n  height: auto;\n}\n.img-rounded {\n  border-radius: 6px;\n}\n.img-thumbnail {\n  padding: 4px;\n  line-height: 1.42857143;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 4px;\n  -webkit-transition: all 0.2s ease-in-out;\n  -o-transition: all 0.2s ease-in-out;\n  transition: all 0.2s ease-in-out;\n  display: 
 inline-block;\n  max-width: 100%;\n  height: auto;\n}\n.img-circle {\n  border-radius: 50%;\n}\nhr {\n  margin-top: 20px;\n  margin-bottom: 20px;\n  border: 0;\n  border-top: 1px solid #eeeeee;\n}\n.sr-only {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  margin: -1px;\n  padding: 0;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n}\n.sr-only-focusable:active,\n.sr-only-focusable:focus {\n  position: static;\n  width: auto;\n  height: auto;\n  margin: 0;\n  overflow: visible;\n  clip: auto;\n}\n[role=\"button\"] {\n  cursor: pointer;\n}\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\n.h1,\n.h2,\n.h3,\n.h4,\n.h5,\n.h6 {\n  font-family: inherit;\n  font-weight: 500;\n  line-height: 1.1;\n  color: inherit;\n}\nh1 small,\nh2 small,\nh3 small,\nh4 small,\nh5 small,\nh6 small,\n.h1 small,\n.h2 small,\n.h3 small,\n.h4 small,\n.h5 small,\n.h6 small,\nh1 .small,\nh2 .small,\nh3 .small,\nh4 .small,\nh5 .small,\nh6 .small,\n.h1 .small,\n.h2 .small,\n.h3 .small,\n.h4 .small,\n.h5 .small
 ,\n.h6 .small {\n  font-weight: normal;\n  line-height: 1;\n  color: #777777;\n}\nh1,\n.h1,\nh2,\n.h2,\nh3,\n.h3 {\n  margin-top: 20px;\n  margin-bottom: 10px;\n}\nh1 small,\n.h1 small,\nh2 small,\n.h2 small,\nh3 small,\n.h3 small,\nh1 .small,\n.h1 .small,\nh2 .small,\n.h2 .small,\nh3 .small,\n.h3 .small {\n  font-size: 65%;\n}\nh4,\n.h4,\nh5,\n.h5,\nh6,\n.h6 {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\nh4 small,\n.h4 small,\nh5 small,\n.h5 small,\nh6 small,\n.h6 small,\nh4 .small,\n.h4 .small,\nh5 .small,\n.h5 .small,\nh6 .small,\n.h6 .small {\n  font-size: 75%;\n}\nh1,\n.h1 {\n  font-size: 36px;\n}\nh2,\n.h2 {\n  font-size: 30px;\n}\nh3,\n.h3 {\n  font-size: 24px;\n}\nh4,\n.h4 {\n  font-size: 18px;\n}\nh5,\n.h5 {\n  font-size: 14px;\n}\nh6,\n.h6 {\n  font-size: 12px;\n}\np {\n  margin: 0 0 10px;\n}\n.lead {\n  margin-bottom: 20px;\n  font-size: 16px;\n  font-weight: 300;\n  line-height: 1.4;\n}\n@media (min-width: 768px) {\n  .lead {\n    font-size: 21px;\n  }\n}\nsmall,\n.s
 mall {\n  font-size: 85%;\n}\nmark,\n.mark {\n  background-color: #fcf8e3;\n  padding: .2em;\n}\n.text-left {\n  text-align: left;\n}\n.text-right {\n  text-align: right;\n}\n.text-center {\n  text-align: center;\n}\n.text-justify {\n  text-align: justify;\n}\n.text-nowrap {\n  white-space: nowrap;\n}\n.text-lowercase {\n  text-transform: lowercase;\n}\n.text-uppercase {\n  text-transform: uppercase;\n}\n.text-capitalize {\n  text-transform: capitalize;\n}\n.text-muted {\n  color: #777777;\n}\n.text-primary {\n  color: #337ab7;\n}\na.text-primary:hover,\na.text-primary:focus {\n  color: #286090;\n}\n.text-success {\n  color: #3c763d;\n}\na.text-success:hover,\na.text-success:focus {\n  color: #2b542c;\n}\n.text-info {\n  color: #31708f;\n}\na.text-info:hover,\na.text-info:focus {\n  color: #245269;\n}\n.text-warning {\n  color: #8a6d3b;\n}\na.text-warning:hover,\na.text-warning:focus {\n  color: #66512c;\n}\n.text-danger {\n  color: #a94442;\n}\na.text-danger:hover,\na.text-danger:f
 ocus {\n  color: #843534;\n}\n.bg-primary {\n  color: #fff;\n  background-color: #337ab7;\n}\na.bg-primary:hover,\na.bg-primary:focus {\n  background-color: #286090;\n}\n.bg-success {\n  background-color: #dff0d8;\n}\na.bg-success:hover,\na.bg-success:focus {\n 

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: YARN-8080. Add restart policy for YARN services. Contributed by Suma Shivaprasad

Posted by ar...@apache.org.
YARN-8080.  Add restart policy for YARN services.
            Contributed by Suma Shivaprasad


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f083ed8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f083ed8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f083ed8

Branch: refs/heads/HDDS-48
Commit: 7f083ed8699a720d3fb82e4ec310356902a6ac30
Parents: 7802af6
Author: Eric Yang <ey...@apache.org>
Authored: Thu May 17 17:16:50 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Thu May 17 17:16:50 2018 -0400

----------------------------------------------------------------------
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |   8 +
 .../hadoop/yarn/service/ClientAMService.java    |   7 +-
 .../hadoop/yarn/service/ServiceContext.java     |   4 +
 .../hadoop/yarn/service/ServiceManager.java     |  25 +-
 .../hadoop/yarn/service/ServiceScheduler.java   |  73 ++-
 .../yarn/service/api/records/Component.java     |  71 +++
 .../service/component/AlwaysRestartPolicy.java  |  82 ++++
 .../yarn/service/component/Component.java       | 209 ++++++--
 .../component/ComponentRestartPolicy.java       |  45 ++
 .../service/component/NeverRestartPolicy.java   |  82 ++++
 .../component/OnFailureRestartPolicy.java       |  87 ++++
 .../component/instance/ComponentInstance.java   |  91 +++-
 .../hadoop/yarn/service/utils/ServiceUtils.java |  18 +
 .../hadoop/yarn/service/ServiceTestUtils.java   |  46 +-
 .../hadoop/yarn/service/TestServiceManager.java |   6 +-
 .../yarn/service/component/TestComponent.java   |  99 +++-
 .../component/TestComponentRestartPolicy.java   | 130 +++++
 .../instance/TestComponentInstance.java         | 484 ++++++++++++++++++-
 .../markdown/yarn-service/YarnServiceAPI.md     |   2 +
 19 files changed, 1447 insertions(+), 122 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
index cea8296..d90ae06 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
@@ -424,6 +424,14 @@ definitions:
         items:
           type: string
         description: A list of quicklink keys defined at the service level, and to be resolved by this component.
+      restartPolicy:
+        type: string
+        description: Policy of restart component. Including ALWAYS (Always restart component even if instance exit code = 0); ON_FAILURE (Only restart component if instance exit code != 0); NEVER (Do not restart in any cases)
+        enum:
+          - ALWAYS
+          - ON_FAILURE
+          - NEVER
+        default: ALWAYS
   ReadinessCheck:
     description: A check to be performed to determine the readiness of a component instance (a container). If no readiness check is specified, the default readiness check will be used unless the yarn.service.default-readiness-check.enabled configuration property is set to false at the component, service, or system level. The artifact field is currently unsupported but may be implemented in the future, enabling a pluggable helper container to support advanced use cases.
     required:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java
index d5d6fa4..e97c3d6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto;
@@ -130,7 +131,7 @@ public class ClientAMService extends AbstractService
     LOG.info("Stop the service by {}", UserGroupInformation.getCurrentUser());
     context.scheduler.getDiagnostics()
         .append("Stopped by user " + UserGroupInformation.getCurrentUser());
-    context.scheduler.setGracefulStop();
+    context.scheduler.setGracefulStop(FinalApplicationStatus.ENDED);
 
     // Stop the service in 2 seconds delay to make sure this rpc call is completed.
     // shutdown hook will be executed which will stop AM gracefully.
@@ -157,10 +158,10 @@ public class ClientAMService extends AbstractService
   public UpgradeServiceResponseProto upgrade(
       UpgradeServiceRequestProto request) throws IOException {
     try {
-      context.getServiceManager().processUpgradeRequest(request.getVersion(),
-          request.getAutoFinalize());
       LOG.info("Upgrading service to version {} by {}", request.getVersion(),
           UserGroupInformation.getCurrentUser());
+      context.getServiceManager().processUpgradeRequest(request.getVersion(),
+          request.getAutoFinalize());
       return UpgradeServiceResponseProto.newBuilder().build();
     } catch (Exception ex) {
       return UpgradeServiceResponseProto.newBuilder().setError(ex.getMessage())

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java
index 6c91b9c..8779153 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java
@@ -56,4 +56,8 @@ public class ServiceContext {
   void setServiceManager(ServiceManager serviceManager) {
     this.serviceManager = Preconditions.checkNotNull(serviceManager);
   }
+
+  public Service getService() {
+    return service;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java
index e6a38dc..05ecb3f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.service.api.records.ServiceState;
 import org.apache.hadoop.yarn.service.component.Component;
 import org.apache.hadoop.yarn.service.component.ComponentEvent;
 import org.apache.hadoop.yarn.service.component.ComponentEventType;
+import org.apache.hadoop.yarn.service.component.ComponentRestartPolicy;
 import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
 import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
 import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
@@ -266,12 +267,24 @@ public class ServiceManager implements EventHandler<ServiceEvent> {
         event.setAutoFinalize(true);
       }
       compsThatNeedUpgrade.forEach(component -> {
-        ComponentEvent needUpgradeEvent = new ComponentEvent(
-            component.getName(), ComponentEventType.UPGRADE)
-            .setTargetSpec(component)
-            .setUpgradeVersion(event.getVersion());
-        context.scheduler.getDispatcher().getEventHandler().handle(
-            needUpgradeEvent);
+        org.apache.hadoop.yarn.service.api.records.Component.RestartPolicyEnum
+            restartPolicy = component.getRestartPolicy();
+
+        final ComponentRestartPolicy restartPolicyHandler =
+            Component.getRestartPolicyHandler(restartPolicy);
+        // Do not allow upgrades for components which have NEVER/ON_FAILURE
+        // restart policy
+        if (restartPolicyHandler.allowUpgrades()) {
+          ComponentEvent needUpgradeEvent = new ComponentEvent(
+              component.getName(), ComponentEventType.UPGRADE).setTargetSpec(
+              component).setUpgradeVersion(event.getVersion());
+          context.scheduler.getDispatcher().getEventHandler().handle(
+              needUpgradeEvent);
+        } else {
+          LOG.info("The component {} has a restart "
+              + "policy that doesnt allow upgrades {} ", component.getName(),
+              component.getRestartPolicy().toString());
+        }
       });
     } else {
       // nothing to upgrade if upgrade auto finalize is requested, trigger a

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index ee0a1a7..d3e8e4f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.yarn.service.api.ServiceApiConstants;
 import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.api.records.ServiceState;
 import org.apache.hadoop.yarn.service.api.records.ConfigFile;
+import org.apache.hadoop.yarn.service.component.ComponentRestartPolicy;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType;
@@ -77,6 +78,7 @@ import org.apache.hadoop.yarn.service.timelineservice.ServiceMetricsSink;
 import org.apache.hadoop.yarn.service.timelineservice.ServiceTimelinePublisher;
 import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
 import org.apache.hadoop.yarn.service.utils.ServiceRegistryUtils;
+import org.apache.hadoop.yarn.service.utils.ServiceUtils;
 import org.apache.hadoop.yarn.util.BoundedAppender;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.slf4j.Logger;
@@ -89,8 +91,10 @@ import java.nio.ByteBuffer;
 import java.text.MessageFormat;
 import java.util.Collection;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
@@ -101,6 +105,10 @@ import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
 import static org.apache.hadoop.yarn.api.records.ContainerExitStatus.KILLED_AFTER_APP_COMPLETION;
 import static org.apache.hadoop.yarn.service.api.ServiceApiConstants.*;
 import static org.apache.hadoop.yarn.service.component.ComponentEventType.*;
+import static org.apache.hadoop.yarn.service.exceptions.LauncherExitCodes
+    .EXIT_FALSE;
+import static org.apache.hadoop.yarn.service.exceptions.LauncherExitCodes
+    .EXIT_SUCCESS;
 
 /**
  *
@@ -158,8 +166,15 @@ public class ServiceScheduler extends CompositeService {
 
   private boolean gracefulStop = false;
 
+  private volatile FinalApplicationStatus finalApplicationStatus =
+      FinalApplicationStatus.ENDED;
+
+  // For unit test override since we don't want to terminate UT process.
+  private ServiceUtils.ProcessTerminationHandler
+      terminationHandler = new ServiceUtils.ProcessTerminationHandler();
+
   public ServiceScheduler(ServiceContext context) {
-    super(context.service.getName());
+    super(context.getService().getName());
     this.context = context;
   }
 
@@ -256,8 +271,9 @@ public class ServiceScheduler extends CompositeService {
         .createAMRMClientAsync(1000, new AMRMClientCallback());
   }
 
-  protected void setGracefulStop() {
+  public void setGracefulStop(FinalApplicationStatus applicationStatus) {
     this.gracefulStop = true;
+    this.finalApplicationStatus = applicationStatus;
     nmClient.getClient().cleanupRunningContainersOnStop(true);
   }
 
@@ -877,4 +893,57 @@ public class ServiceScheduler extends CompositeService {
   public boolean hasAtLeastOnePlacementConstraint() {
     return hasAtLeastOnePlacementConstraint;
   }
+
+  /*
+* Check if all components of the scheduler finished.
+* If all components finished
+*   (which #failed-instances + #suceeded-instances = #total-n-containers)
+* The service will be terminated.
+*/
+  public synchronized void terminateServiceIfAllComponentsFinished() {
+    boolean shouldTerminate = true;
+
+    // Succeeded comps and failed comps, for logging purposes.
+    Set<String> succeededComponents = new HashSet<>();
+    Set<String> failedComponents = new HashSet<>();
+
+    for (Component comp : getAllComponents().values()) {
+      ComponentRestartPolicy restartPolicy = comp.getRestartPolicyHandler();
+      if (!restartPolicy.shouldTerminate(comp)) {
+        shouldTerminate = false;
+        break;
+      }
+
+      long nFailed = comp.getNumFailedInstances();
+
+      if (nFailed > 0) {
+        failedComponents.add(comp.getName());
+      } else{
+        succeededComponents.add(comp.getName());
+      }
+    }
+
+    if (shouldTerminate) {
+      LOG.info("All component finished, exiting Service Master... "
+          + ", final status=" + (failedComponents.isEmpty() ?
+          "Succeeded" :
+          "Failed"));
+      LOG.info("Succeeded components: [" + org.apache.commons.lang3.StringUtils
+          .join(succeededComponents, ",") + "]");
+      LOG.info("Failed components: [" + org.apache.commons.lang3.StringUtils
+          .join(failedComponents, ",") + "]");
+
+      if (failedComponents.isEmpty()) {
+        setGracefulStop(FinalApplicationStatus.SUCCEEDED);
+        getTerminationHandler().terminate(EXIT_SUCCESS);
+      } else{
+        setGracefulStop(FinalApplicationStatus.FAILED);
+        getTerminationHandler().terminate(EXIT_FALSE);
+      }
+    }
+  }
+
+  public ServiceUtils.ProcessTerminationHandler getTerminationHandler() {
+    return terminationHandler;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java
index 7deb076..0481123 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java
@@ -17,6 +17,7 @@
 
 package org.apache.hadoop.yarn.service.api.records;
 
+import com.fasterxml.jackson.annotation.JsonValue;
 import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
 
@@ -29,7 +30,9 @@ import java.util.Objects;
 import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlEnum;
 import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlType;
 
 import com.fasterxml.jackson.annotation.JsonInclude;
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -98,6 +101,74 @@ public class Component implements Serializable {
   private List<Container> containers =
       Collections.synchronizedList(new ArrayList<Container>());
 
+
+  @JsonProperty("restart_policy")
+  @XmlElement(name = "restart_policy")
+  private RestartPolicyEnum restartPolicy = RestartPolicyEnum.ALWAYS;
+
+  /**
+   * Policy of restart component. Including ALWAYS - Long lived components
+   * (Always restart component instance even if instance exit code &#x3D; 0.);
+   *
+   * ON_FAILURE (Only restart component instance if instance exit code !&#x3D;
+   * 0);
+   * NEVER (Do not restart in any cases)
+   *
+   * @return restartPolicy
+   **/
+  @XmlType(name = "restart_policy")
+  @XmlEnum
+  public enum RestartPolicyEnum {
+    ALWAYS("ALWAYS"),
+
+    ON_FAILURE("ON_FAILURE"),
+
+    NEVER("NEVER");
+    private String value;
+
+    RestartPolicyEnum(String value) {
+      this.value = value;
+    }
+
+    @Override
+    @JsonValue
+    public String toString() {
+      return value;
+    }
+  }
+
+  public Component restartPolicy(RestartPolicyEnum restartPolicyEnumVal) {
+    this.restartPolicy = restartPolicyEnumVal;
+    return this;
+  }
+
+  /**
+   * Policy of restart component.
+   *
+   * Including
+   * ALWAYS (Always restart component instance even if instance exit
+   * code &#x3D; 0);
+   *
+   * ON_FAILURE (Only restart component instance if instance exit code !&#x3D;
+   * 0);
+   *
+   * NEVER (Do not restart in any cases)
+   *
+   * @return restartPolicy
+   **/
+  @ApiModelProperty(value = "Policy of restart component. Including ALWAYS "
+      + "(Always restart component even if instance exit code = 0); "
+      + "ON_FAILURE (Only restart component if instance exit code != 0); "
+      + "NEVER (Do not restart in any cases)")
+  public RestartPolicyEnum getRestartPolicy() {
+    return restartPolicy;
+  }
+
+  public void setRestartPolicy(RestartPolicyEnum restartPolicy) {
+    this.restartPolicy = restartPolicy;
+  }
+
+
   /**
    * Name of the service component (mandatory).
    **/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java
new file mode 100644
index 0000000..704ab14
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.component;
+
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+
+/**
+ * Always restart policy allows for restarts for long live components which
+ * never terminate.
+ */
+public final class AlwaysRestartPolicy implements ComponentRestartPolicy {
+
+  private static AlwaysRestartPolicy INSTANCE = new AlwaysRestartPolicy();
+
+  private AlwaysRestartPolicy() {
+  }
+
+  public static AlwaysRestartPolicy getInstance() {
+    return INSTANCE;
+  }
+
+  @Override public boolean isLongLived() {
+    return true;
+  }
+
+  /**
+   * This is always false since these components never terminate
+   *
+   * @param component
+   * @return
+   */
+  @Override public boolean hasCompleted(Component component) {
+    return false;
+  }
+
+  /**
+   * This is always false since these components never terminate
+   *
+   * @param component
+   * @return
+   */
+  @Override public boolean hasCompletedSuccessfully(Component component) {
+    return false;
+  }
+
+  @Override public boolean shouldRelaunchInstance(
+      ComponentInstance componentInstance, ContainerStatus containerStatus) {
+    return true;
+  }
+
+  @Override public boolean isReadyForDownStream(Component dependentComponent) {
+    if (dependentComponent.getNumReadyInstances() < dependentComponent
+        .getNumDesiredInstances()) {
+      return false;
+    }
+    return true;
+  }
+
+  @Override public boolean allowUpgrades() {
+    return true;
+  }
+
+  @Override public boolean shouldTerminate(Component component) {
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
index 7979c19..931877e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
@@ -18,9 +18,12 @@
 
 package org.apache.hadoop.yarn.service.component;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
+import static org.apache.hadoop.yarn.service.api.records.Component
+    .RestartPolicyEnum;
 import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -111,6 +114,13 @@ public class Component implements EventHandler<ComponentEvent> {
   // The number of containers failed since last reset. This excludes preempted,
   // disk_failed containers etc. This will be reset to 0 periodically.
   public AtomicInteger currentContainerFailure = new AtomicInteger(0);
+
+  //succeeded and Failed instances are Populated only for RestartPolicyEnum
+  //.ON_FAILURE/NEVER
+  private Map<String, ComponentInstance> succeededInstances =
+      new ConcurrentHashMap<>();
+  private Map<String, ComponentInstance> failedInstances =
+      new ConcurrentHashMap<>();
   private boolean healthThresholdMonitorEnabled = false;
 
   private AtomicBoolean upgradeInProgress = new AtomicBoolean(false);
@@ -297,7 +307,7 @@ public class Component implements EventHandler<ComponentEvent> {
     @Override
     public ComponentState transition(Component component,
         ComponentEvent event) {
-      component.setDesiredContainers((int)event.getDesired());
+      component.setDesiredContainers((int) event.getDesired());
       if (!component.areDependenciesReady()) {
         LOG.info("[FLEX COMPONENT {}]: Flex deferred because dependencies not"
             + " satisfied.", component.getName());
@@ -402,11 +412,37 @@ public class Component implements EventHandler<ComponentEvent> {
     }
   }
 
-  private static ComponentState checkIfStable(Component component) {
+  @VisibleForTesting
+  static ComponentState checkIfStable(Component component) {
+    if (component.getRestartPolicyHandler().isLongLived()) {
+      return updateStateForLongRunningComponents(component);
+    } else{
+      //NEVER/ON_FAILURE
+      return updateStateForTerminatingComponents(component);
+    }
+  }
+
+  private static ComponentState updateStateForTerminatingComponents(
+      Component component) {
+    if (component.getNumRunningInstances() + component
+        .getNumSucceededInstances() + component.getNumFailedInstances()
+        < component.getComponentSpec().getNumberOfContainers()) {
+      component.componentSpec.setState(
+          org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING);
+      return FLEXING;
+    } else{
+      component.componentSpec.setState(
+          org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
+      return STABLE;
+    }
+  }
+
+  private static ComponentState updateStateForLongRunningComponents(
+      Component component) {
     // if desired == running
     if (component.componentMetrics.containersReady.value() == component
-        .getComponentSpec().getNumberOfContainers() &&
-        component.numContainersThatNeedUpgrade.get() == 0) {
+        .getComponentSpec().getNumberOfContainers()
+        && component.numContainersThatNeedUpgrade.get() == 0) {
       component.componentSpec.setState(
           org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
       return STABLE;
@@ -425,17 +461,41 @@ public class Component implements EventHandler<ComponentEvent> {
 
   // This method should be called whenever there is an increment or decrement
   // of a READY state container of a component
-  public static synchronized void checkAndUpdateComponentState(
+  //This should not matter for terminating components
+  private static synchronized void checkAndUpdateComponentState(
       Component component, boolean isIncrement) {
     org.apache.hadoop.yarn.service.api.records.ComponentState curState =
         component.componentSpec.getState();
-    if (isIncrement) {
-      // check if all containers are in READY state
-      if (component.numContainersThatNeedUpgrade.get() == 0 &&
-          component.componentMetrics.containersReady.value() ==
-              component.componentMetrics.containersDesired.value()) {
-        component.componentSpec.setState(
-            org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
+
+    if (component.getRestartPolicyHandler().isLongLived()) {
+      if (isIncrement) {
+        // check if all containers are in READY state
+        if (component.numContainersThatNeedUpgrade.get() == 0
+            && component.componentMetrics.containersReady.value()
+            == component.componentMetrics.containersDesired.value()) {
+          component.componentSpec.setState(
+              org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
+          if (curState != component.componentSpec.getState()) {
+            LOG.info("[COMPONENT {}] state changed from {} -> {}",
+                component.componentSpec.getName(), curState,
+                component.componentSpec.getState());
+          }
+          // component state change will trigger re-check of service state
+          component.context.getServiceManager().checkAndUpdateServiceState();
+        }
+      } else{
+        // container moving out of READY state could be because of FLEX down so
+        // still need to verify the count before changing the component state
+        if (component.componentMetrics.containersReady.value()
+            < component.componentMetrics.containersDesired.value()) {
+          component.componentSpec.setState(
+              org.apache.hadoop.yarn.service.api.records.ComponentState
+                  .FLEXING);
+        } else if (component.componentMetrics.containersReady.value()
+            == component.componentMetrics.containersDesired.value()) {
+          component.componentSpec.setState(
+              org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
+        }
         if (curState != component.componentSpec.getState()) {
           LOG.info("[COMPONENT {}] state changed from {} -> {}",
               component.componentSpec.getName(), curState,
@@ -445,44 +505,38 @@ public class Component implements EventHandler<ComponentEvent> {
         component.context.getServiceManager().checkAndUpdateServiceState();
       }
     } else {
-      // container moving out of READY state could be because of FLEX down so
-      // still need to verify the count before changing the component state
-      if (component.componentMetrics.containersReady
-          .value() < component.componentMetrics.containersDesired.value()) {
-        component.componentSpec.setState(
-            org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING);
-      } else if (component.componentMetrics.containersReady
-          .value() == component.componentMetrics.containersDesired.value()) {
-        component.componentSpec.setState(
-            org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
-      }
-      if (curState != component.componentSpec.getState()) {
-        LOG.info("[COMPONENT {}] state changed from {} -> {}",
-            component.componentSpec.getName(), curState,
-            component.componentSpec.getState());
-      }
       // component state change will trigger re-check of service state
       component.context.getServiceManager().checkAndUpdateServiceState();
     }
     // when the service is stable then the state of component needs to
     // transition to stable
-    component.dispatcher.getEventHandler().handle(new ComponentEvent(
-        component.getName(), ComponentEventType.CHECK_STABLE));
+    component.dispatcher.getEventHandler().handle(
+        new ComponentEvent(component.getName(),
+            ComponentEventType.CHECK_STABLE));
   }
 
   private static class ContainerCompletedTransition extends BaseTransition {
     @Override
     public void transition(Component component, ComponentEvent event) {
+
       component.updateMetrics(event.getStatus());
       component.dispatcher.getEventHandler().handle(
-          new ComponentInstanceEvent(event.getStatus().getContainerId(),
-              STOP).setStatus(event.getStatus()));
-      component.componentSpec.setState(
-          org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING);
-      if (component.context.service.getState().equals(ServiceState.STABLE)) {
-        component.getScheduler().getApp().setState(ServiceState.STARTED);
-        LOG.info("Service def state changed from {} -> {}",
-            ServiceState.STABLE, ServiceState.STARTED);
+          new ComponentInstanceEvent(event.getStatus().getContainerId(), STOP)
+              .setStatus(event.getStatus()));
+
+      ComponentRestartPolicy restartPolicy =
+          component.getRestartPolicyHandler();
+
+      if (restartPolicy.shouldRelaunchInstance(event.getInstance(),
+          event.getStatus())) {
+        component.componentSpec.setState(
+            org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING);
+
+        if (component.context.service.getState().equals(ServiceState.STABLE)) {
+          component.getScheduler().getApp().setState(ServiceState.STARTED);
+          LOG.info("Service def state changed from {} -> {}",
+              ServiceState.STABLE, ServiceState.STARTED);
+        }
       }
     }
   }
@@ -725,8 +779,6 @@ public class Component implements EventHandler<ComponentEvent> {
     componentMetrics.containersDesired.set(n);
   }
 
-
-
   private void updateMetrics(ContainerStatus status) {
     switch (status.getExitStatus()) {
     case SUCCESS:
@@ -753,7 +805,7 @@ public class Component implements EventHandler<ComponentEvent> {
       String host = scheduler.getLiveInstances().get(status.getContainerId())
           .getNodeId().getHost();
       failureTracker.incNodeFailure(host);
-      currentContainerFailure.getAndIncrement() ;
+      currentContainerFailure.getAndIncrement();
     }
   }
 
@@ -763,17 +815,18 @@ public class Component implements EventHandler<ComponentEvent> {
       return true;
     }
     for (String dependency : dependencies) {
-      Component dependentComponent =
-          scheduler.getAllComponents().get(dependency);
+      Component dependentComponent = scheduler.getAllComponents().get(
+          dependency);
       if (dependentComponent == null) {
         LOG.error("Couldn't find dependency {} for {} (should never happen)",
             dependency, getName());
         continue;
       }
-      if (dependentComponent.getNumReadyInstances() < dependentComponent
-          .getNumDesiredInstances()) {
+
+      if (!dependentComponent.isReadyForDownstream()) {
         LOG.info("[COMPONENT {}]: Dependency {} not satisfied, only {} of {}"
-                + " instances are ready.", getName(), dependency,
+                + " instances are ready or the dependent component has not "
+                + "completed ", getName(), dependency,
             dependentComponent.getNumReadyInstances(),
             dependentComponent.getNumDesiredInstances());
         return false;
@@ -782,6 +835,7 @@ public class Component implements EventHandler<ComponentEvent> {
     return true;
   }
 
+
   public Map<String, String> getDependencyHostIpTokens() {
     Map<String, String> tokens = new HashMap<>();
     List<String> dependencies = componentSpec.getDependencies();
@@ -955,4 +1009,67 @@ public class Component implements EventHandler<ComponentEvent> {
       boolean healthThresholdMonitorEnabled) {
     this.healthThresholdMonitorEnabled = healthThresholdMonitorEnabled;
   }
+
+  public Collection<ComponentInstance> getSucceededInstances() {
+    return succeededInstances.values();
+  }
+
+  public long getNumSucceededInstances() {
+    return succeededInstances.size();
+  }
+
+  public long getNumFailedInstances() {
+    return failedInstances.size();
+  }
+
+  public Collection<ComponentInstance> getFailedInstances() {
+    return failedInstances.values();
+  }
+
+  public synchronized void markAsSucceeded(ComponentInstance instance) {
+    removeFailedInstanceIfExists(instance);
+    succeededInstances.put(instance.getCompInstanceName(), instance);
+  }
+
+  public synchronized void markAsFailed(ComponentInstance instance) {
+    removeSuccessfulInstanceIfExists(instance);
+    failedInstances.put(instance.getCompInstanceName(), instance);
+  }
+
+  public boolean removeFailedInstanceIfExists(ComponentInstance instance) {
+    if (failedInstances.containsKey(instance.getCompInstanceName())) {
+      failedInstances.remove(instance.getCompInstanceName());
+      return true;
+    }
+    return false;
+  }
+
+  public boolean removeSuccessfulInstanceIfExists(ComponentInstance instance) {
+    if (succeededInstances.containsKey(instance.getCompInstanceName())) {
+      succeededInstances.remove(instance.getCompInstanceName());
+      return true;
+    }
+    return false;
+  }
+
+  public boolean isReadyForDownstream() {
+    return getRestartPolicyHandler().isReadyForDownStream(this);
+  }
+
+  public static ComponentRestartPolicy getRestartPolicyHandler(
+      RestartPolicyEnum restartPolicyEnum) {
+
+    if (RestartPolicyEnum.NEVER == restartPolicyEnum) {
+      return NeverRestartPolicy.getInstance();
+    } else if (RestartPolicyEnum.ON_FAILURE == restartPolicyEnum) {
+      return OnFailureRestartPolicy.getInstance();
+    } else{
+      return AlwaysRestartPolicy.getInstance();
+    }
+  }
+
+  public ComponentRestartPolicy getRestartPolicyHandler() {
+    RestartPolicyEnum restartPolicyEnum = getComponentSpec().getRestartPolicy();
+    return getRestartPolicyHandler(restartPolicyEnum);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java
new file mode 100644
index 0000000..23b0fb9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.component;
+
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+
+/**
+ * Interface for Component Restart policies.
+ * Which is used to make decisions on termination/restart of components and
+ * their instances.
+ */
+public interface ComponentRestartPolicy {
+
+  boolean isLongLived();
+
+  boolean hasCompleted(Component component);
+
+  boolean hasCompletedSuccessfully(Component component);
+
+  boolean shouldRelaunchInstance(ComponentInstance componentInstance,
+      ContainerStatus containerStatus);
+
+  boolean isReadyForDownStream(Component component);
+
+  boolean allowUpgrades();
+
+  boolean shouldTerminate(Component component);
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/NeverRestartPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/NeverRestartPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/NeverRestartPolicy.java
new file mode 100644
index 0000000..ace1f89
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/NeverRestartPolicy.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.component;
+
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+
+/**
+ * Policy for components with instances that do not require/support a restart.
+ */
+public final class NeverRestartPolicy implements ComponentRestartPolicy {
+
+  private static NeverRestartPolicy INSTANCE = new NeverRestartPolicy();
+
+  private NeverRestartPolicy() {
+  }
+
+  public static NeverRestartPolicy getInstance() {
+    return INSTANCE;
+  }
+
+  @Override public boolean isLongLived() {
+    return false;
+  }
+
+  @Override public boolean hasCompleted(Component component) {
+    if (component.getNumSucceededInstances() + component.getNumFailedInstances()
+        < component.getNumDesiredInstances()) {
+      return false;
+    }
+    return true;
+  }
+
+  @Override public boolean hasCompletedSuccessfully(Component component) {
+    if (component.getNumSucceededInstances() == component
+        .getNumDesiredInstances()) {
+      return true;
+    }
+    return false;
+  }
+
+  @Override public boolean shouldRelaunchInstance(
+      ComponentInstance componentInstance, ContainerStatus containerStatus) {
+    return false;
+  }
+
+  @Override public boolean isReadyForDownStream(Component component) {
+    if (hasCompleted(component)) {
+      return true;
+    }
+    return false;
+  }
+
+  @Override public boolean allowUpgrades() {
+    return false;
+  }
+
+  @Override public boolean shouldTerminate(Component component) {
+    long nSucceeded = component.getNumSucceededInstances();
+    long nFailed = component.getNumFailedInstances();
+    if (nSucceeded + nFailed < component.getComponentSpec()
+        .getNumberOfContainers()) {
+      return false;
+    }
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/OnFailureRestartPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/OnFailureRestartPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/OnFailureRestartPolicy.java
new file mode 100644
index 0000000..39fba2a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/OnFailureRestartPolicy.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.component;
+
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+
+/**
+ * Policy for components that require restarts for instances on failure.
+ */
+public final class OnFailureRestartPolicy implements ComponentRestartPolicy {
+
+  private static OnFailureRestartPolicy INSTANCE = new OnFailureRestartPolicy();
+
+  private OnFailureRestartPolicy() {
+  }
+
+  public static OnFailureRestartPolicy getInstance() {
+    return INSTANCE;
+  }
+
+  @Override public boolean isLongLived() {
+    return false;
+  }
+
+  @Override public boolean hasCompleted(Component component) {
+    if (hasCompletedSuccessfully(component)) {
+      return true;
+    }
+
+    return false;
+  }
+
+  @Override public boolean hasCompletedSuccessfully(Component component) {
+    if (component.getNumSucceededInstances() == component
+        .getNumDesiredInstances()) {
+      return true;
+    }
+
+    return false;
+  }
+
+  @Override public boolean shouldRelaunchInstance(
+      ComponentInstance componentInstance, ContainerStatus containerStatus) {
+
+    if (ComponentInstance.hasContainerFailed(containerStatus)) {
+      return true;
+    }
+
+    return false;
+  }
+
+  @Override public boolean isReadyForDownStream(Component component) {
+    if (hasCompletedSuccessfully(component)) {
+      return true;
+    }
+
+    return false;
+  }
+
+  @Override public boolean allowUpgrades() {
+    return false;
+  }
+
+  @Override public boolean shouldTerminate(Component component) {
+    long nSucceeded = component.getNumSucceededInstances();
+    if (nSucceeded < component.getComponentSpec().getNumberOfContainers()) {
+      return false;
+    }
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
index a323649..529596d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.service.component.instance;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.registry.client.api.RegistryConstants;
@@ -25,9 +26,9 @@ import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
 import org.apache.hadoop.registry.client.binding.RegistryUtils;
 import org.apache.hadoop.registry.client.types.ServiceRecord;
 import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
-import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -44,6 +45,7 @@ import org.apache.hadoop.yarn.service.api.records.ContainerState;
 import org.apache.hadoop.yarn.service.component.Component;
 import org.apache.hadoop.yarn.service.component.ComponentEvent;
 import org.apache.hadoop.yarn.service.component.ComponentEventType;
+import org.apache.hadoop.yarn.service.component.ComponentRestartPolicy;
 import org.apache.hadoop.yarn.service.monitor.probe.ProbeStatus;
 import org.apache.hadoop.yarn.service.registry.YarnRegistryViewForProviders;
 import org.apache.hadoop.yarn.service.timelineservice.ServiceTimelinePublisher;
@@ -96,8 +98,10 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
   // This container object is used for rest API query
   private org.apache.hadoop.yarn.service.api.records.Container containerSpec;
 
+
   private static final StateMachineFactory<ComponentInstance,
-      ComponentInstanceState, ComponentInstanceEventType, ComponentInstanceEvent>
+      ComponentInstanceState, ComponentInstanceEventType,
+      ComponentInstanceEvent>
       stateMachineFactory =
       new StateMachineFactory<ComponentInstance, ComponentInstanceState,
           ComponentInstanceEventType, ComponentInstanceEvent>(INIT)
@@ -230,6 +234,47 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
     }
   }
 
+  @VisibleForTesting
+  static void handleComponentInstanceRelaunch(
+      ComponentInstance compInstance, ComponentInstanceEvent event) {
+    Component comp = compInstance.getComponent();
+
+    // Do we need to relaunch the service?
+    boolean hasContainerFailed = hasContainerFailed(event.getStatus());
+
+    ComponentRestartPolicy restartPolicy = comp.getRestartPolicyHandler();
+
+    if (restartPolicy.shouldRelaunchInstance(compInstance, event.getStatus())) {
+      // re-ask the failed container.
+      comp.requestContainers(1);
+      comp.reInsertPendingInstance(compInstance);
+      LOG.info(compInstance.getCompInstanceId()
+              + ": {} completed. Reinsert back to pending list and requested " +
+              "a new container." + System.lineSeparator() +
+              " exitStatus={}, diagnostics={}.",
+          event.getContainerId(), event.getStatus().getExitStatus(),
+          event.getStatus().getDiagnostics());
+    } else {
+      // When no relaunch, update component's #succeeded/#failed
+      // instances.
+      if (hasContainerFailed) {
+        comp.markAsFailed(compInstance);
+      } else {
+        comp.markAsSucceeded(compInstance);
+      }
+      LOG.info(compInstance.getCompInstanceId() + (!hasContainerFailed ?
+          " succeeded" :
+          " failed") + " without retry, exitStatus=" + event.getStatus());
+      comp.getScheduler().terminateServiceIfAllComponentsFinished();
+    }
+  }
+
+  public static boolean hasContainerFailed(ContainerStatus containerStatus) {
+    //Mark conainer as failed if we cant get its exit status i.e null?
+    return containerStatus == null || containerStatus.getExitStatus() !=
+        ContainerExitStatus.SUCCESS;
+  }
+
   private static class ContainerStoppedTransition extends  BaseTransition {
     // whether the container failed before launched by AM or not.
     boolean failedBeforeLaunching = false;
@@ -244,9 +289,8 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
     @Override
     public void transition(ComponentInstance compInstance,
         ComponentInstanceEvent event) {
-      // re-ask the failed container.
+
       Component comp = compInstance.component;
-      comp.requestContainers(1);
       String containerDiag =
           compInstance.getCompInstanceId() + ": " + event.getStatus()
               .getDiagnostics();
@@ -259,7 +303,10 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
         compInstance.component.decContainersReady(true);
       }
       compInstance.component.decRunningContainers();
-      boolean shouldExit = false;
+      // Should we fail (terminate) the service?
+      boolean shouldFailService = false;
+
+      final ServiceScheduler scheduler = comp.getScheduler();
       // Check if it exceeds the failure threshold, but only if health threshold
       // monitor is not enabled
       if (!comp.isHealthThresholdMonitorEnabled()
@@ -271,10 +318,10 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
             comp.getName(), comp.currentContainerFailure.get(), comp.maxContainerFailurePerComp);
         compInstance.diagnostics.append(exitDiag);
         // append to global diagnostics that will be reported to RM.
-        comp.getScheduler().getDiagnostics().append(containerDiag);
-        comp.getScheduler().getDiagnostics().append(exitDiag);
+        scheduler.getDiagnostics().append(containerDiag);
+        scheduler.getDiagnostics().append(exitDiag);
         LOG.warn(exitDiag);
-        shouldExit = true;
+        shouldFailService = true;
       }
 
       if (!failedBeforeLaunching) {
@@ -296,25 +343,14 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
       }
 
       // remove the failed ContainerId -> CompInstance mapping
-      comp.getScheduler().removeLiveCompInstance(event.getContainerId());
+      scheduler.removeLiveCompInstance(event.getContainerId());
 
-      comp.reInsertPendingInstance(compInstance);
+      // According to component restart policy, handle container restart
+      // or finish the service (if all components finished)
+      handleComponentInstanceRelaunch(compInstance, event);
 
-      LOG.info(compInstance.getCompInstanceId()
-              + ": {} completed. Reinsert back to pending list and requested " +
-              "a new container." + System.lineSeparator() +
-              " exitStatus={}, diagnostics={}.",
-          event.getContainerId(), event.getStatus().getExitStatus(),
-          event.getStatus().getDiagnostics());
-      if (shouldExit) {
-        // Sleep for 5 seconds in hope that the state can be recorded in ATS.
-        // in case there's a client polling the comp state, it can be notified.
-        try {
-          Thread.sleep(5000);
-        } catch (InterruptedException e) {
-          LOG.error("Interrupted on sleep while exiting.", e);
-        }
-        ExitUtil.terminate(-1);
+      if (shouldFailService) {
+        scheduler.getTerminationHandler().terminate(-1);
       }
     }
   }
@@ -630,4 +666,9 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
         >>> 32));
     return result;
   }
+
+  @VisibleForTesting public org.apache.hadoop.yarn.service.api.records
+      .Container getContainerSpec() {
+    return containerSpec;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java
index 915b836..707bbf0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.net.DNS;
+import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.service.conf.YarnServiceConstants;
@@ -571,4 +572,21 @@ public final class ServiceUtils {
     // Fallback to querying the default hostname as we did before.
     return InetAddress.getLocalHost().getCanonicalHostName();
   }
+
+  /**
+   * Process termination handler - exist with specified exit code after
+   * waiting a while for ATS state to be in sync.
+   */
+  public static class ProcessTerminationHandler {
+    public void terminate(int exitCode) {
+      // Sleep for 5 seconds in hope that the state can be recorded in ATS.
+      // in case there's a client polling the comp state, it can be notified.
+      try {
+        Thread.sleep(5000);
+      } catch (InterruptedException e) {
+        log.info("Interrupted on sleep while exiting.", e);
+      }
+      ExitUtil.terminate(exitCode);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
index 599b8a7..86b4cea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java
@@ -57,6 +57,8 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.net.URL;
 import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Map;
 
 import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_REGISTRY_ZK_QUORUM;
@@ -99,8 +101,32 @@ public class ServiceTestUtils {
     return exampleApp;
   }
 
+  // Example service definition
+  // 2 components, each of which has 2 containers.
+  public static Service createTerminatingJobExample(String serviceName) {
+    Service exampleApp = new Service();
+    exampleApp.setName(serviceName);
+    exampleApp.setVersion("v1");
+    exampleApp.addComponent(
+        createComponent("terminating-comp1", 2, "sleep " + "1000",
+            Component.RestartPolicyEnum.NEVER, null));
+    exampleApp.addComponent(
+        createComponent("terminating-comp2", 2, "sleep 1000",
+            Component.RestartPolicyEnum.ON_FAILURE, new ArrayList<String>() {{
+                add("terminating-comp1");
+            }}));
+    exampleApp.addComponent(
+        createComponent("terminating-comp3", 2, "sleep 1000",
+            Component.RestartPolicyEnum.ON_FAILURE, new ArrayList<String>() {{
+                add("terminating-comp2");
+            }}));
+
+    return exampleApp;
+  }
+
   public static Component createComponent(String name) {
-    return createComponent(name, 2L, "sleep 1000");
+    return createComponent(name, 2L, "sleep 1000",
+        Component.RestartPolicyEnum.ALWAYS, null);
   }
 
   protected static Component createComponent(String name, long numContainers,
@@ -116,6 +142,18 @@ public class ServiceTestUtils {
     return comp1;
   }
 
+  protected static Component createComponent(String name, long numContainers,
+      String command, Component.RestartPolicyEnum restartPolicyEnum,
+      List<String> dependencies) {
+    Component comp = createComponent(name, numContainers, command);
+    comp.setRestartPolicy(restartPolicyEnum);
+
+    if (dependencies != null) {
+      comp.dependencies(dependencies);
+    }
+    return comp;
+  }
+
   public static SliderFileSystem initMockFs() throws IOException {
     return initMockFs(null);
   }
@@ -306,6 +344,12 @@ public class ServiceTestUtils {
     return client;
   }
 
+  public static ServiceManager createServiceManager(ServiceContext context) {
+    ServiceManager serviceManager = new ServiceManager(context);
+    context.setServiceManager(serviceManager);
+    return serviceManager;
+  }
+
   /**
    * Creates a YarnClient for test purposes.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceManager.java
index 56a0c71..fc509f1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceManager.java
@@ -227,14 +227,16 @@ public class TestServiceManager {
   }
 
   public static Service createBaseDef(String name) {
+    return createDef(name, ServiceTestUtils.createExampleApplication());
+  }
+
+  public static Service createDef(String name, Service serviceDef) {
     ApplicationId applicationId = ApplicationId.newInstance(
         System.currentTimeMillis(), 1);
-    Service serviceDef = ServiceTestUtils.createExampleApplication();
     serviceDef.setId(applicationId.toString());
     serviceDef.setName(name);
     serviceDef.setState(ServiceState.STARTED);
     Artifact artifact = createTestArtifact("1");
-
     serviceDef.getComponents().forEach(component ->
         component.setArtifact(artifact));
     return serviceDef;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java
index 600e438..d7c15ec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java
@@ -38,8 +38,10 @@ import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType;
+
 import org.apache.hadoop.yarn.service.containerlaunch.ContainerLaunchService;
 import org.apache.hadoop.yarn.service.registry.YarnRegistryViewForProviders;
+import org.apache.log4j.Logger;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -50,6 +52,7 @@ import java.util.Iterator;
 import java.util.Map;
 
 import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.STOP;
+
 import static org.mockito.Matchers.anyObject;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.mock;
@@ -60,6 +63,9 @@ import static org.mockito.Mockito.when;
  */
 public class TestComponent {
 
+  private static final int WAIT_MS_PER_LOOP = 1000;
+  static final Logger LOG = Logger.getLogger(TestComponent.class);
+
   @Rule
   public ServiceTestUtils.ServiceFSWatcher rule =
       new ServiceTestUtils.ServiceFSWatcher();
@@ -158,6 +164,57 @@ public class TestComponent {
         comp.getComponentSpec().getConfiguration().getEnv("key1"));
   }
 
+  @Test
+  public void testComponentStateUpdatesWithTerminatingComponents() throws
+      Exception {
+    final String serviceName =
+        "testComponentStateUpdatesWithTerminatingComponents";
+
+    Service testService = ServiceTestUtils.createTerminatingJobExample(
+        serviceName);
+    TestServiceManager.createDef(serviceName, testService);
+
+    ServiceContext context = createTestContext(rule, testService);
+
+    for (Component comp : context.scheduler.getAllComponents().values()) {
+
+      Iterator<ComponentInstance> instanceIter = comp.
+          getAllComponentInstances().iterator();
+
+      ComponentInstance componentInstance = instanceIter.next();
+      Container instanceContainer = componentInstance.getContainer();
+
+      Assert.assertEquals(0, comp.getNumSucceededInstances());
+      Assert.assertEquals(0, comp.getNumFailedInstances());
+      Assert.assertEquals(2, comp.getNumRunningInstances());
+      Assert.assertEquals(2, comp.getNumReadyInstances());
+      Assert.assertEquals(0, comp.getPendingInstances().size());
+
+      //stop 1 container
+      ContainerStatus containerStatus = ContainerStatus.newInstance(
+          instanceContainer.getId(),
+          org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE,
+          "successful", 0);
+      comp.handle(new ComponentEvent(comp.getName(),
+          ComponentEventType.CONTAINER_COMPLETED).setStatus(containerStatus));
+      componentInstance.handle(
+          new ComponentInstanceEvent(componentInstance.getContainer().getId(),
+              ComponentInstanceEventType.STOP).setStatus(containerStatus));
+
+      Assert.assertEquals(1, comp.getNumSucceededInstances());
+      Assert.assertEquals(0, comp.getNumFailedInstances());
+      Assert.assertEquals(1, comp.getNumRunningInstances());
+      Assert.assertEquals(1, comp.getNumReadyInstances());
+      Assert.assertEquals(0, comp.getPendingInstances().size());
+
+      org.apache.hadoop.yarn.service.component.ComponentState componentState =
+          Component.checkIfStable(comp);
+      Assert.assertEquals(
+          org.apache.hadoop.yarn.service.component.ComponentState.STABLE,
+          componentState);
+    }
+  }
+
   private static org.apache.hadoop.yarn.service.api.records.Component
       createSpecWithEnv(String serviceName, String compName, String key,
       String val) {
@@ -171,31 +228,38 @@ public class TestComponent {
   public static ServiceContext createTestContext(
       ServiceTestUtils.ServiceFSWatcher fsWatcher, String serviceName)
       throws Exception {
+    return createTestContext(fsWatcher,
+        TestServiceManager.createBaseDef(serviceName));
+  }
+
+  public static ServiceContext createTestContext(
+      ServiceTestUtils.ServiceFSWatcher fsWatcher, Service serviceDef)
+      throws Exception {
     ServiceContext context = new ServiceContext();
-    context.service = TestServiceManager.createBaseDef(serviceName);
+    context.service = serviceDef;
     context.fs = fsWatcher.getFs();
 
     ContainerLaunchService mockLaunchService = mock(
         ContainerLaunchService.class);
 
     context.scheduler = new ServiceScheduler(context) {
-      @Override
-      protected YarnRegistryViewForProviders createYarnRegistryOperations(
+      @Override protected YarnRegistryViewForProviders
+      createYarnRegistryOperations(
           ServiceContext context, RegistryOperations registryClient) {
         return mock(YarnRegistryViewForProviders.class);
       }
 
-      @Override
-      public NMClientAsync createNMClient() {
+      @Override public NMClientAsync createNMClient() {
         NMClientAsync nmClientAsync = super.createNMClient();
         NMClient nmClient = mock(NMClient.class);
         try {
           when(nmClient.getContainerStatus(anyObject(), anyObject()))
-              .thenAnswer((Answer<ContainerStatus>) invocation ->
-                  ContainerStatus.newInstance(
-                      (ContainerId) invocation.getArguments()[0],
-                      org.apache.hadoop.yarn.api.records.ContainerState.RUNNING,
-                      "", 0));
+              .thenAnswer(
+                  (Answer<ContainerStatus>) invocation -> ContainerStatus
+                      .newInstance((ContainerId) invocation.getArguments()[0],
+                          org.apache.hadoop.yarn.api.records.ContainerState
+                              .RUNNING,
+                          "", 0));
         } catch (YarnException | IOException e) {
           throw new RuntimeException(e);
         }
@@ -203,16 +267,18 @@ public class TestComponent {
         return nmClientAsync;
       }
 
-      @Override
-      public ContainerLaunchService getContainerLaunchService() {
+      @Override public ContainerLaunchService getContainerLaunchService() {
         return mockLaunchService;
       }
     };
     context.scheduler.init(fsWatcher.getConf());
 
+    ServiceTestUtils.createServiceManager(context);
+
     doNothing().when(mockLaunchService).
         reInitCompInstance(anyObject(), anyObject(), anyObject(), anyObject());
     stabilizeComponents(context);
+
     return context;
   }
 
@@ -223,6 +289,8 @@ public class TestComponent {
     context.attemptId = attemptId;
     Map<String, Component>
         componentState = context.scheduler.getAllComponents();
+
+    int counter = 0;
     for (org.apache.hadoop.yarn.service.api.records.Component componentSpec :
         context.service.getComponents()) {
       Component component = new org.apache.hadoop.yarn.service.component.
@@ -230,9 +298,12 @@ public class TestComponent {
       componentState.put(component.getName(), component);
       component.handle(new ComponentEvent(component.getName(),
           ComponentEventType.FLEX));
+
       for (int i = 0; i < componentSpec.getNumberOfContainers(); i++) {
-        assignNewContainer(attemptId, i + 1, context, component);
+        counter++;
+        assignNewContainer(attemptId, counter, context, component);
       }
+
       component.handle(new ComponentEvent(component.getName(),
           ComponentEventType.CHECK_STABLE));
     }
@@ -241,6 +312,8 @@ public class TestComponent {
   private static void assignNewContainer(
       ApplicationAttemptId attemptId, long containerNum,
       ServiceContext context, Component component) {
+
+
     Container container = org.apache.hadoop.yarn.api.records.Container
         .newInstance(ContainerId.newContainerId(attemptId, containerNum),
             NODE_ID, "localhost", null, null,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponentRestartPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponentRestartPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponentRestartPolicy.java
new file mode 100644
index 0000000..60f5c91
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponentRestartPolicy.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.component;
+
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests for ComponentRestartPolicy implementations.
+ */
+public class TestComponentRestartPolicy {
+
+  @Test
+  public void testAlwaysRestartPolicy() throws Exception {
+
+    AlwaysRestartPolicy alwaysRestartPolicy = AlwaysRestartPolicy.getInstance();
+
+    Component component = mock(Component.class);
+    when(component.getNumReadyInstances()).thenReturn(1);
+    when(component.getNumDesiredInstances()).thenReturn(2);
+
+    ComponentInstance instance = mock(ComponentInstance.class);
+    when(instance.getComponent()).thenReturn(component);
+
+    ContainerStatus containerStatus = mock(ContainerStatus.class);
+
+    assertEquals(true, alwaysRestartPolicy.isLongLived());
+    assertEquals(true, alwaysRestartPolicy.allowUpgrades());
+    assertEquals(false, alwaysRestartPolicy.hasCompleted(component));
+    assertEquals(false,
+        alwaysRestartPolicy.hasCompletedSuccessfully(component));
+
+    assertEquals(true,
+        alwaysRestartPolicy.shouldRelaunchInstance(instance, containerStatus));
+
+    assertEquals(false, alwaysRestartPolicy.isReadyForDownStream(component));
+  }
+
+  @Test
+  public void testNeverRestartPolicy() throws Exception {
+
+    NeverRestartPolicy restartPolicy = NeverRestartPolicy.getInstance();
+
+    Component component = mock(Component.class);
+    when(component.getNumSucceededInstances()).thenReturn(new Long(1));
+    when(component.getNumFailedInstances()).thenReturn(new Long(2));
+    when(component.getNumDesiredInstances()).thenReturn(3);
+
+    ComponentInstance instance = mock(ComponentInstance.class);
+    when(instance.getComponent()).thenReturn(component);
+
+    ContainerStatus containerStatus = mock(ContainerStatus.class);
+
+    assertEquals(false, restartPolicy.isLongLived());
+    assertEquals(false, restartPolicy.allowUpgrades());
+    assertEquals(true, restartPolicy.hasCompleted(component));
+    assertEquals(false,
+        restartPolicy.hasCompletedSuccessfully(component));
+
+    assertEquals(false,
+        restartPolicy.shouldRelaunchInstance(instance, containerStatus));
+
+    assertEquals(true, restartPolicy.isReadyForDownStream(component));
+  }
+
+  @Test
+  public void testOnFailureRestartPolicy() throws Exception {
+
+    OnFailureRestartPolicy restartPolicy = OnFailureRestartPolicy.getInstance();
+
+    Component component = mock(Component.class);
+    when(component.getNumSucceededInstances()).thenReturn(new Long(3));
+    when(component.getNumFailedInstances()).thenReturn(new Long(0));
+    when(component.getNumDesiredInstances()).thenReturn(3);
+
+    ComponentInstance instance = mock(ComponentInstance.class);
+    when(instance.getComponent()).thenReturn(component);
+
+    ContainerStatus containerStatus = mock(ContainerStatus.class);
+    when(containerStatus.getExitStatus()).thenReturn(0);
+
+    assertEquals(false, restartPolicy.isLongLived());
+    assertEquals(false, restartPolicy.allowUpgrades());
+    assertEquals(true, restartPolicy.hasCompleted(component));
+    assertEquals(true,
+        restartPolicy.hasCompletedSuccessfully(component));
+
+    assertEquals(false,
+        restartPolicy.shouldRelaunchInstance(instance, containerStatus));
+
+    assertEquals(true, restartPolicy.isReadyForDownStream(component));
+
+
+    when(component.getNumSucceededInstances()).thenReturn(new Long(2));
+    when(component.getNumFailedInstances()).thenReturn(new Long(1));
+    when(component.getNumDesiredInstances()).thenReturn(3);
+
+    assertEquals(false, restartPolicy.hasCompleted(component));
+    assertEquals(false,
+        restartPolicy.hasCompletedSuccessfully(component));
+
+    when(containerStatus.getExitStatus()).thenReturn(-1000);
+
+    assertEquals(true,
+        restartPolicy.shouldRelaunchInstance(instance, containerStatus));
+
+    assertEquals(false, restartPolicy.isReadyForDownStream(component));
+
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: YARN-7530. Refactored YARN service API project location. Contributed by Chandni Singh

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
new file mode 100644
index 0000000..f9cfa92
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
@@ -0,0 +1,391 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.client;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.service.SystemServiceManager;
+import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.api.records.ServiceState;
+import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.security.PrivilegedExceptionAction;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.jsonSerDeser;
+
+/**
+ * SystemServiceManager implementation.
+ * Scan for configure system service path.
+ *
+ * The service path structure is as follows:
+ * SYSTEM_SERVICE_DIR_PATH
+ * |---- sync
+ * |     |--- user1
+ * |     |    |---- service1.yarnfile
+ * |     |    |---- service2.yarnfile
+ * |     |--- user2
+ * |     |    |---- service1.yarnfile
+ * |     |    ....
+ * |     |
+ * |---- async
+ * |     |--- user3
+ * |     |    |---- service1.yarnfile
+ * |     |    |---- service2.yarnfile
+ * |     |--- user4
+ * |     |    |---- service1.yarnfile
+ * |     |    ....
+ * |     |
+ *
+ * sync: These services are launched at the time of service start synchronously.
+ *       It is a blocking service start.
+ * async: These services are launched in separate thread without any delay after
+ *       service start. Non-blocking service start.
+ */
+public class SystemServiceManagerImpl extends AbstractService
+    implements SystemServiceManager {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SystemServiceManagerImpl.class);
+
+  private static final String YARN_FILE_SUFFIX = ".yarnfile";
+  private static final String SYNC = "sync";
+  private static final String ASYNC = "async";
+
+  private FileSystem fs;
+  private Path systemServiceDir;
+  private AtomicBoolean stopExecutors = new AtomicBoolean(false);
+  private Map<String, Set<Service>> syncUserServices = new HashMap<>();
+  private Map<String, Set<Service>> asyncUserServices = new HashMap<>();
+  private UserGroupInformation loginUGI;
+  private Thread serviceLaucher;
+
+  @VisibleForTesting
+  private int badFileNameExtensionSkipCounter;
+  @VisibleForTesting
+  private Map<String, Integer> ignoredUserServices =
+      new HashMap<>();
+  @VisibleForTesting
+  private int badDirSkipCounter;
+
+  public SystemServiceManagerImpl() {
+    super(SystemServiceManagerImpl.class.getName());
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    String dirPath =
+        conf.get(YarnServiceConf.YARN_SERVICES_SYSTEM_SERVICE_DIRECTORY);
+    if (dirPath != null) {
+      systemServiceDir = new Path(dirPath);
+      LOG.info("System Service Directory is configured to {}",
+          systemServiceDir);
+      fs = systemServiceDir.getFileSystem(conf);
+      this.loginUGI = UserGroupInformation.isSecurityEnabled() ?
+          UserGroupInformation.getLoginUser() :
+          UserGroupInformation.getCurrentUser();
+      LOG.info("UserGroupInformation initialized to {}", loginUGI);
+    }
+  }
+
+  @Override
+  protected void serviceStart() throws Exception {
+    scanForUserServices();
+    launchUserService(syncUserServices);
+    // Create a thread and submit services in background otherwise it
+    // block RM switch time.
+    serviceLaucher = new Thread(createRunnable());
+    serviceLaucher.setName("System service launcher");
+    serviceLaucher.start();
+  }
+
+  @Override
+  protected void serviceStop() throws Exception {
+    LOG.info("Stopping {}", getName());
+    stopExecutors.set(true);
+
+    if (serviceLaucher != null) {
+      serviceLaucher.interrupt();
+      try {
+        serviceLaucher.join();
+      } catch (InterruptedException ie) {
+        LOG.warn("Interrupted Exception while stopping", ie);
+      }
+    }
+  }
+
+  private Runnable createRunnable() {
+    return new Runnable() {
+      @Override
+      public void run() {
+        launchUserService(asyncUserServices);
+      }
+    };
+  }
+
+  void launchUserService(Map<String, Set<Service>> userServices) {
+    for (Map.Entry<String, Set<Service>> entry : userServices.entrySet()) {
+      String user = entry.getKey();
+      Set<Service> services = entry.getValue();
+      if (services.isEmpty()) {
+        continue;
+      }
+      ServiceClient serviceClient = null;
+      try {
+        UserGroupInformation userUgi = getProxyUser(user);
+        serviceClient = createServiceClient(userUgi);
+        for (Service service : services) {
+          LOG.info("POST: createService = {} user = {}", service, userUgi);
+          try {
+            launchServices(userUgi, serviceClient, service);
+          } catch (IOException | UndeclaredThrowableException e) {
+            if (e.getCause() != null) {
+              LOG.warn(e.getCause().getMessage());
+            } else {
+              String message =
+                  "Failed to create service " + service.getName() + " : ";
+              LOG.error(message, e);
+            }
+          }
+        }
+      } catch (InterruptedException e) {
+        LOG.warn("System service launcher thread interrupted", e);
+        break;
+      } catch (Exception e) {
+        LOG.error("Error while submitting services for user " + user, e);
+      } finally {
+        if (serviceClient != null) {
+          try {
+            serviceClient.close();
+          } catch (IOException e) {
+            LOG.warn("Error while closing serviceClient for user {}", user);
+          }
+        }
+      }
+    }
+  }
+
+  private ServiceClient createServiceClient(UserGroupInformation userUgi)
+      throws IOException, InterruptedException {
+    ServiceClient serviceClient =
+        userUgi.doAs(new PrivilegedExceptionAction<ServiceClient>() {
+          @Override public ServiceClient run()
+              throws IOException, YarnException {
+            ServiceClient sc = getServiceClient();
+            sc.init(getConfig());
+            sc.start();
+            return sc;
+          }
+        });
+    return serviceClient;
+  }
+
+  private void launchServices(UserGroupInformation userUgi,
+      ServiceClient serviceClient, Service service)
+      throws IOException, InterruptedException {
+    if (service.getState() == ServiceState.STOPPED) {
+      userUgi.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override public Void run() throws IOException, YarnException {
+          serviceClient.actionBuild(service);
+          return null;
+        }
+      });
+      LOG.info("Service {} version {} saved.", service.getName(),
+          service.getVersion());
+    } else {
+      ApplicationId applicationId =
+          userUgi.doAs(new PrivilegedExceptionAction<ApplicationId>() {
+            @Override public ApplicationId run()
+                throws IOException, YarnException {
+              ApplicationId applicationId = serviceClient.actionCreate(service);
+              return applicationId;
+            }
+          });
+      LOG.info("Service {} submitted with Application ID: {}",
+          service.getName(), applicationId);
+    }
+  }
+
+  ServiceClient getServiceClient() {
+    return new ServiceClient();
+  }
+
+  private UserGroupInformation getProxyUser(String user) {
+    UserGroupInformation ugi;
+    if (UserGroupInformation.isSecurityEnabled()) {
+      ugi = UserGroupInformation.createProxyUser(user, loginUGI);
+    } else {
+      ugi = UserGroupInformation.createRemoteUser(user);
+    }
+    return ugi;
+  }
+
+  // scan for both launch service types i.e sync and async
+  void scanForUserServices() throws IOException {
+    if (systemServiceDir == null) {
+      return;
+    }
+    try {
+      LOG.info("Scan for launch type on {}", systemServiceDir);
+      RemoteIterator<FileStatus> iterLaunchType = list(systemServiceDir);
+      while (iterLaunchType.hasNext()) {
+        FileStatus launchType = iterLaunchType.next();
+        if (!launchType.isDirectory()) {
+          LOG.debug("Scanner skips for unknown file {}", launchType.getPath());
+          continue;
+        }
+        if (launchType.getPath().getName().equals(SYNC)) {
+          scanForUserServiceDefinition(launchType.getPath(), syncUserServices);
+        } else if (launchType.getPath().getName().equals(ASYNC)) {
+          scanForUserServiceDefinition(launchType.getPath(), asyncUserServices);
+        } else {
+          badDirSkipCounter++;
+          LOG.debug("Scanner skips for unknown dir {}.", launchType.getPath());
+        }
+      }
+    } catch (FileNotFoundException e) {
+      LOG.warn("System service directory {} doesn't not exist.",
+          systemServiceDir);
+    }
+  }
+
+  // Files are under systemServiceDir/<users>. Scan for 2 levels
+  // 1st level for users
+  // 2nd level for service definitions under user
+  private void scanForUserServiceDefinition(Path userDirPath,
+      Map<String, Set<Service>> userServices) throws IOException {
+    LOG.info("Scan for users on {}", userDirPath);
+    RemoteIterator<FileStatus> iterUsers = list(userDirPath);
+    while (iterUsers.hasNext()) {
+      FileStatus userDir = iterUsers.next();
+      // if 1st level is not user directory then skip it.
+      if (!userDir.isDirectory()) {
+        LOG.info(
+            "Service definition {} doesn't belong to any user. Ignoring.. ",
+            userDir.getPath().getName());
+        continue;
+      }
+      String userName = userDir.getPath().getName();
+      LOG.info("Scanning service definitions for user {}.", userName);
+
+      //2nd level scan
+      RemoteIterator<FileStatus> iterServices = list(userDir.getPath());
+      while (iterServices.hasNext()) {
+        FileStatus serviceCache = iterServices.next();
+        String filename = serviceCache.getPath().getName();
+        if (!serviceCache.isFile()) {
+          LOG.info("Scanner skips for unknown dir {}", filename);
+          continue;
+        }
+        if (!filename.endsWith(YARN_FILE_SUFFIX)) {
+          LOG.info("Scanner skips for unknown file extension, filename = {}",
+              filename);
+          badFileNameExtensionSkipCounter++;
+          continue;
+        }
+        Service service = getServiceDefinition(serviceCache.getPath());
+        if (service != null) {
+          Set<Service> services = userServices.get(userName);
+          if (services == null) {
+            services = new HashSet<>();
+            userServices.put(userName, services);
+          }
+          if (!services.add(service)) {
+            int count = ignoredUserServices.containsKey(userName) ?
+                ignoredUserServices.get(userName) : 0;
+            ignoredUserServices.put(userName, count + 1);
+            LOG.warn(
+                "Ignoring service {} for the user {} as it is already present,"
+                    + " filename = {}", service.getName(), userName, filename);
+          } else {
+            LOG.info("Added service {} for the user {}, filename = {}",
+                service.getName(), userName, filename);
+          }
+        }
+      }
+    }
+  }
+
+  private Service getServiceDefinition(Path filePath) {
+    Service service = null;
+    try {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Loading service definition from FS: " + filePath);
+      }
+      service = jsonSerDeser.load(fs, filePath);
+    } catch (IOException e) {
+      LOG.info("Error while loading service definition from FS: {}", e);
+    }
+    return service;
+  }
+
+  private RemoteIterator<FileStatus> list(Path path) throws IOException {
+    return new StoppableRemoteIterator(fs.listStatusIterator(path));
+  }
+
+  @VisibleForTesting Map<String, Integer> getIgnoredUserServices() {
+    return ignoredUserServices;
+  }
+
+  private class StoppableRemoteIterator implements RemoteIterator<FileStatus> {
+    private final RemoteIterator<FileStatus> remote;
+
+    StoppableRemoteIterator(RemoteIterator<FileStatus> remote) {
+      this.remote = remote;
+    }
+
+    @Override public boolean hasNext() throws IOException {
+      return !stopExecutors.get() && remote.hasNext();
+    }
+
+    @Override public FileStatus next() throws IOException {
+      return remote.next();
+    }
+  }
+
+  @VisibleForTesting
+  Map<String, Set<Service>> getSyncUserServices() {
+    return syncUserServices;
+  }
+
+  @VisibleForTesting
+  int getBadFileNameExtensionSkipCounter() {
+    return badFileNameExtensionSkipCounter;
+  }
+
+  @VisibleForTesting
+  int getBadDirSkipCounter() {
+    return badDirSkipCounter;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/package-info.java
new file mode 100644
index 0000000..cf5ce11
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.service.client contains classes
+ * for YARN Services Client API.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.service.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
new file mode 100644
index 0000000..46c9abe
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -0,0 +1,818 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.webapp;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.VersionInfo;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.service.api.records.Component;
+import org.apache.hadoop.yarn.service.api.records.ComponentState;
+import org.apache.hadoop.yarn.service.api.records.Container;
+import org.apache.hadoop.yarn.service.api.records.ContainerState;
+import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.api.records.ServiceState;
+import org.apache.hadoop.yarn.service.api.records.ServiceStatus;
+import org.apache.hadoop.yarn.service.client.ServiceClient;
+import org.apache.hadoop.yarn.service.conf.RestApiConstants;
+import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.yarn.service.api.records.ServiceState.ACCEPTED;
+import static org.apache.hadoop.yarn.service.conf.RestApiConstants.*;
+import static org.apache.hadoop.yarn.service.exceptions.LauncherExitCodes.*;
+
+/**
+ * The rest API endpoints for users to manage services on YARN.
+ */
+@Singleton
+@Path(CONTEXT_ROOT)
+public class ApiServer {
+
+  public ApiServer() {
+    super();
+  }
+  
+  @Inject
+  public ApiServer(Configuration conf) {
+    super();
+  }
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ApiServer.class);
+  private static Configuration YARN_CONFIG = new YarnConfiguration();
+  private ServiceClient serviceClientUnitTest;
+  private boolean unitTest = false;
+
+  static {
+    init();
+  }
+
+  // initialize all the common resources - order is important
+  private static void init() {
+  }
+
+  @GET
+  @Path(VERSION)
+  @Consumes({ MediaType.APPLICATION_JSON })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
+  public Response getVersion() {
+    String version = VersionInfo.getBuildVersion();
+    LOG.info(version);
+    return Response.ok("{ \"hadoop_version\": \"" + version + "\"}").build();
+  }
+
+  @POST
+  @Path(SERVICE_ROOT_PATH)
+  @Consumes({ MediaType.APPLICATION_JSON })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
+  public Response createService(@Context HttpServletRequest request,
+      Service service) {
+    ServiceStatus serviceStatus = new ServiceStatus();
+    try {
+      UserGroupInformation ugi = getProxyUser(request);
+      LOG.info("POST: createService = {} user = {}", service, ugi);
+      if(service.getState()==ServiceState.STOPPED) {
+        ugi.doAs(new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws YarnException, IOException {
+            ServiceClient sc = getServiceClient();
+            sc.init(YARN_CONFIG);
+            sc.start();
+            sc.actionBuild(service);
+            sc.close();
+            return null;
+          }
+        });
+        serviceStatus.setDiagnostics("Service " + service.getName() +
+            " version " + service.getVersion() + " saved.");
+      } else {
+        ApplicationId applicationId = ugi
+            .doAs(new PrivilegedExceptionAction<ApplicationId>() {
+              @Override
+              public ApplicationId run() throws IOException, YarnException {
+                ServiceClient sc = getServiceClient();
+                sc.init(YARN_CONFIG);
+                sc.start();
+                ApplicationId applicationId = sc.actionCreate(service);
+                sc.close();
+                return applicationId;
+              }
+            });
+        serviceStatus.setDiagnostics("Application ID: " + applicationId);
+      }
+      serviceStatus.setState(ACCEPTED);
+      serviceStatus.setUri(
+          CONTEXT_ROOT + SERVICE_ROOT_PATH + "/" + service
+              .getName());
+      return formatResponse(Status.ACCEPTED, serviceStatus);
+    } catch (AccessControlException e) {
+      serviceStatus.setDiagnostics(e.getMessage());
+      return formatResponse(Status.FORBIDDEN, e.getCause().getMessage());
+    } catch (IllegalArgumentException e) {
+      return formatResponse(Status.BAD_REQUEST, e.getMessage());
+    } catch (IOException | InterruptedException e) {
+      String message = "Failed to create service " + service.getName()
+          + ": {}";
+      LOG.error(message, e);
+      return formatResponse(Status.INTERNAL_SERVER_ERROR, e.getMessage());
+    } catch (UndeclaredThrowableException e) {
+      String message = "Failed to create service " + service.getName()
+          + ": {}";
+      LOG.error(message, e);
+      if (e.getCause().getMessage().contains("already exists")) {
+        message = "Service name " + service.getName() + " is already taken.";
+      } else {
+        message = e.getCause().getMessage();
+      }
+      return formatResponse(Status.INTERNAL_SERVER_ERROR,
+          message);
+    }
+  }
+
+  @GET
+  @Path(SERVICE_PATH)
+  @Consumes({ MediaType.APPLICATION_JSON })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
+  public Response getService(@Context HttpServletRequest request,
+      @PathParam(SERVICE_NAME) String appName) {
+    ServiceStatus serviceStatus = new ServiceStatus();
+    try {
+      if (appName == null) {
+        throw new IllegalArgumentException("Service name cannot be null.");
+      }
+      UserGroupInformation ugi = getProxyUser(request);
+      LOG.info("GET: getService for appName = {} user = {}", appName, ugi);
+      Service app = getServiceFromClient(ugi, appName);
+      return Response.ok(app).build();
+    } catch (AccessControlException e) {
+      return formatResponse(Status.FORBIDDEN, e.getMessage());
+    } catch (IllegalArgumentException e) {
+      serviceStatus.setDiagnostics(e.getMessage());
+      serviceStatus.setCode(ERROR_CODE_APP_NAME_INVALID);
+      return Response.status(Status.NOT_FOUND).entity(serviceStatus)
+          .build();
+    } catch (FileNotFoundException e) {
+      serviceStatus.setDiagnostics("Service " + appName + " not found");
+      serviceStatus.setCode(ERROR_CODE_APP_NAME_INVALID);
+      return Response.status(Status.NOT_FOUND).entity(serviceStatus)
+          .build();
+    } catch (IOException | InterruptedException e) {
+      LOG.error("Get service failed: {}", e);
+      return formatResponse(Status.INTERNAL_SERVER_ERROR, e.getMessage());
+    } catch (UndeclaredThrowableException e) {
+      LOG.error("Get service failed: {}", e);
+      return formatResponse(Status.INTERNAL_SERVER_ERROR,
+          e.getCause().getMessage());
+    }
+  }
+
+  @DELETE
+  @Path(SERVICE_PATH)
+  @Consumes({ MediaType.APPLICATION_JSON })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
+  public Response deleteService(@Context HttpServletRequest request,
+      @PathParam(SERVICE_NAME) String appName) {
+    try {
+      if (appName == null) {
+        throw new IllegalArgumentException("Service name can not be null.");
+      }
+      UserGroupInformation ugi = getProxyUser(request);
+      LOG.info("DELETE: deleteService for appName = {} user = {}",
+          appName, ugi);
+      return stopService(appName, true, ugi);
+    } catch (AccessControlException e) {
+      return formatResponse(Status.FORBIDDEN, e.getMessage());
+    } catch (IllegalArgumentException e) {
+      return formatResponse(Status.BAD_REQUEST, e.getMessage());
+    } catch (UndeclaredThrowableException e) {
+      LOG.error("Fail to stop service: {}", e);
+      return formatResponse(Status.BAD_REQUEST,
+          e.getCause().getMessage());
+    } catch (YarnException | FileNotFoundException e) {
+      return formatResponse(Status.NOT_FOUND, e.getMessage());
+    } catch (Exception e) {
+      LOG.error("Fail to stop service: {}", e);
+      return formatResponse(Status.INTERNAL_SERVER_ERROR, e.getMessage());
+    }
+  }
+
+  private Response stopService(String appName, boolean destroy,
+      final UserGroupInformation ugi) throws Exception {
+    int result = ugi.doAs(new PrivilegedExceptionAction<Integer>() {
+      @Override
+      public Integer run() throws Exception {
+        int result = 0;
+        ServiceClient sc = getServiceClient();
+        sc.init(YARN_CONFIG);
+        sc.start();
+        Exception stopException = null;
+        try {
+          result = sc.actionStop(appName, destroy);
+          if (result == EXIT_SUCCESS) {
+            LOG.info("Successfully stopped service {}", appName);
+          }
+        } catch (Exception e) {
+          LOG.info("Got exception stopping service", e);
+          stopException = e;
+        }
+        if (destroy) {
+          result = sc.actionDestroy(appName);
+          if (result == EXIT_SUCCESS) {
+            LOG.info("Successfully deleted service {}", appName);
+          }
+        } else {
+          if (stopException != null) {
+            throw stopException;
+          }
+        }
+        sc.close();
+        return result;
+      }
+    });
+    ServiceStatus serviceStatus = new ServiceStatus();
+    if (destroy) {
+      if (result == EXIT_SUCCESS) {
+        serviceStatus.setDiagnostics("Successfully destroyed service " +
+            appName);
+      } else {
+        if (result == EXIT_NOT_FOUND) {
+          serviceStatus
+              .setDiagnostics("Service " + appName + " doesn't exist");
+          return formatResponse(Status.BAD_REQUEST, serviceStatus);
+        } else {
+          serviceStatus
+              .setDiagnostics("Service " + appName + " error cleaning up " +
+                  "registry");
+          return formatResponse(Status.INTERNAL_SERVER_ERROR, serviceStatus);
+        }
+      }
+    } else {
+      if (result == EXIT_COMMAND_ARGUMENT_ERROR) {
+        serviceStatus
+            .setDiagnostics("Service " + appName + " is already stopped");
+        return formatResponse(Status.BAD_REQUEST, serviceStatus);
+      } else {
+        serviceStatus.setDiagnostics("Successfully stopped service " + appName);
+      }
+    }
+    return formatResponse(Status.OK, serviceStatus);
+  }
+
+  @PUT
+  @Path(COMPONENTS_PATH)
+  @Consumes({MediaType.APPLICATION_JSON})
+  @Produces({RestApiConstants.MEDIA_TYPE_JSON_UTF8, MediaType.TEXT_PLAIN})
+  public Response updateComponents(@Context HttpServletRequest request,
+      @PathParam(SERVICE_NAME) String serviceName,
+      List<Component> requestComponents) {
+
+    try {
+      if (requestComponents == null || requestComponents.isEmpty()) {
+        throw new YarnException("No components provided.");
+      }
+      UserGroupInformation ugi = getProxyUser(request);
+      Set<String> compNamesToUpgrade = new HashSet<>();
+      requestComponents.forEach(reqComp -> {
+        if (reqComp.getState() != null &&
+            reqComp.getState().equals(ComponentState.UPGRADING)) {
+          compNamesToUpgrade.add(reqComp.getName());
+        }
+      });
+      LOG.info("PUT: upgrade components {} for service {} " +
+          "user = {}", compNamesToUpgrade, serviceName, ugi);
+      return processComponentsUpgrade(ugi, serviceName, compNamesToUpgrade);
+    } catch (AccessControlException e) {
+      return formatResponse(Response.Status.FORBIDDEN, e.getMessage());
+    } catch (YarnException e) {
+      return formatResponse(Response.Status.BAD_REQUEST, e.getMessage());
+    } catch (IOException | InterruptedException e) {
+      return formatResponse(Response.Status.INTERNAL_SERVER_ERROR,
+          e.getMessage());
+    } catch (UndeclaredThrowableException e) {
+      return formatResponse(Response.Status.INTERNAL_SERVER_ERROR,
+          e.getCause().getMessage());
+    }
+  }
+
+  @PUT
+  @Path(COMPONENT_PATH)
+  @Consumes({ MediaType.APPLICATION_JSON })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8",
+              MediaType.TEXT_PLAIN  })
+  public Response updateComponent(@Context HttpServletRequest request,
+      @PathParam(SERVICE_NAME) String appName,
+      @PathParam(COMPONENT_NAME) String componentName, Component component) {
+
+    try {
+      if (component == null) {
+        throw new YarnException("No component data provided");
+      }
+      if (component.getName() != null
+          && !component.getName().equals(componentName)) {
+        String msg = "Component name in the request object ("
+            + component.getName() + ") does not match that in the URI path ("
+            + componentName + ")";
+        throw new YarnException(msg);
+      }
+      UserGroupInformation ugi = getProxyUser(request);
+      if (component.getState() != null &&
+          component.getState().equals(ComponentState.UPGRADING)) {
+        LOG.info("PUT: upgrade component {} for service {} " +
+            "user = {}", component.getName(), appName, ugi);
+        return processComponentsUpgrade(ugi, appName,
+            Sets.newHashSet(componentName));
+      }
+
+      if (component.getNumberOfContainers() == null) {
+        throw new YarnException("No container count provided");
+      }
+      if (component.getNumberOfContainers() < 0) {
+        String message = "Invalid number of containers specified "
+            + component.getNumberOfContainers();
+        throw new YarnException(message);
+      }
+      Map<String, Long> original = ugi
+          .doAs(new PrivilegedExceptionAction<Map<String, Long>>() {
+            @Override
+            public Map<String, Long> run() throws YarnException, IOException {
+              ServiceClient sc = new ServiceClient();
+              sc.init(YARN_CONFIG);
+              sc.start();
+              Map<String, Long> original = sc.flexByRestService(appName,
+                  Collections.singletonMap(componentName,
+                      component.getNumberOfContainers()));
+              sc.close();
+              return original;
+            }
+          });
+      ServiceStatus status = new ServiceStatus();
+      status.setDiagnostics(
+          "Updating component (" + componentName + ") size from " + original
+              .get(componentName) + " to " + component.getNumberOfContainers());
+      return formatResponse(Status.OK, status);
+    } catch (AccessControlException e) {
+      return formatResponse(Status.FORBIDDEN, e.getMessage());
+    } catch (YarnException e) {
+      return formatResponse(Status.BAD_REQUEST, e.getMessage());
+    } catch (IOException | InterruptedException e) {
+      return formatResponse(Status.INTERNAL_SERVER_ERROR,
+          e.getMessage());
+    } catch (UndeclaredThrowableException e) {
+      return formatResponse(Status.INTERNAL_SERVER_ERROR,
+          e.getCause().getMessage());
+    }
+  }
+
+  @PUT
+  @Path(SERVICE_PATH)
+  @Consumes({ MediaType.APPLICATION_JSON })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
+  public Response updateService(@Context HttpServletRequest request,
+      @PathParam(SERVICE_NAME) String appName,
+      Service updateServiceData) {
+    try {
+      UserGroupInformation ugi = getProxyUser(request);
+      LOG.info("PUT: updateService for app = {} with data = {} user = {}",
+          appName, updateServiceData, ugi);
+      // Ignore the app name provided in updateServiceData and always use
+      // appName path param
+      updateServiceData.setName(appName);
+
+      if (updateServiceData.getState() != null
+          && updateServiceData.getState() == ServiceState.FLEX) {
+        return flexService(updateServiceData, ugi);
+      }
+      // For STOP the app should be running. If already stopped then this
+      // operation will be a no-op. For START it should be in stopped state.
+      // If already running then this operation will be a no-op.
+      if (updateServiceData.getState() != null
+          && updateServiceData.getState() == ServiceState.STOPPED) {
+        return stopService(appName, false, ugi);
+      }
+
+      // If a START is requested
+      if (updateServiceData.getState() != null
+          && updateServiceData.getState() == ServiceState.STARTED) {
+        return startService(appName, ugi);
+      }
+
+      // If an UPGRADE is requested
+      if (updateServiceData.getState() != null && (
+          updateServiceData.getState() == ServiceState.UPGRADING ||
+              updateServiceData.getState() ==
+                  ServiceState.UPGRADING_AUTO_FINALIZE)) {
+        return upgradeService(updateServiceData, ugi);
+      }
+
+      // If new lifetime value specified then update it
+      if (updateServiceData.getLifetime() != null
+          && updateServiceData.getLifetime() > 0) {
+        return updateLifetime(appName, updateServiceData, ugi);
+      }
+    } catch (UndeclaredThrowableException e) {
+      return formatResponse(Status.BAD_REQUEST,
+          e.getCause().getMessage());
+    } catch (AccessControlException e) {
+      return formatResponse(Status.FORBIDDEN, e.getMessage());
+    } catch (FileNotFoundException e) {
+      String message = "Application is not found app: " + appName;
+      LOG.error(message, e);
+      return formatResponse(Status.NOT_FOUND, e.getMessage());
+    } catch (YarnException e) {
+      String message = "Service is not found in hdfs: " + appName;
+      LOG.error(message, e);
+      return formatResponse(Status.NOT_FOUND, e.getMessage());
+    } catch (Exception e) {
+      String message = "Error while performing operation for app: " + appName;
+      LOG.error(message, e);
+      return formatResponse(Status.INTERNAL_SERVER_ERROR, e.getMessage());
+    }
+
+    // If nothing happens consider it a no-op
+    return Response.status(Status.NO_CONTENT).build();
+  }
+
+  @PUT
+  @Path(COMP_INSTANCE_LONG_PATH)
+  @Consumes({MediaType.APPLICATION_JSON})
+  @Produces({RestApiConstants.MEDIA_TYPE_JSON_UTF8, MediaType.TEXT_PLAIN})
+  public Response updateComponentInstance(@Context HttpServletRequest request,
+      @PathParam(SERVICE_NAME) String serviceName,
+      @PathParam(COMPONENT_NAME) String componentName,
+      @PathParam(COMP_INSTANCE_NAME) String compInstanceName,
+      Container reqContainer) {
+
+    try {
+      UserGroupInformation ugi = getProxyUser(request);
+      LOG.info("PUT: update component instance {} for component = {}" +
+              " service = {} user = {}", compInstanceName, componentName,
+          serviceName, ugi);
+      if (reqContainer == null) {
+        throw new YarnException("No container data provided.");
+      }
+      Service service = getServiceFromClient(ugi, serviceName);
+      Component component = service.getComponent(componentName);
+      if (component == null) {
+        throw new YarnException(String.format(
+            "The component name in the URI path (%s) is invalid.",
+            componentName));
+      }
+
+      Container liveContainer = component.getComponentInstance(
+          compInstanceName);
+      if (liveContainer == null) {
+        throw new YarnException(String.format(
+            "The component (%s) does not have a component instance (%s).",
+            componentName, compInstanceName));
+      }
+
+      if (reqContainer.getState() != null
+          && reqContainer.getState().equals(ContainerState.UPGRADING)) {
+        return processContainersUpgrade(ugi, service,
+            Lists.newArrayList(liveContainer));
+      }
+    } catch (AccessControlException e) {
+      return formatResponse(Response.Status.FORBIDDEN, e.getMessage());
+    } catch (YarnException e) {
+      return formatResponse(Response.Status.BAD_REQUEST, e.getMessage());
+    } catch (IOException | InterruptedException e) {
+      return formatResponse(Response.Status.INTERNAL_SERVER_ERROR,
+          e.getMessage());
+    } catch (UndeclaredThrowableException e) {
+      return formatResponse(Response.Status.INTERNAL_SERVER_ERROR,
+          e.getCause().getMessage());
+    }
+    return Response.status(Status.NO_CONTENT).build();
+  }
+
+  @PUT
+  @Path(COMP_INSTANCES_PATH)
+  @Consumes({MediaType.APPLICATION_JSON})
+  @Produces({RestApiConstants.MEDIA_TYPE_JSON_UTF8, MediaType.TEXT_PLAIN})
+  public Response updateComponentInstances(@Context HttpServletRequest request,
+      @PathParam(SERVICE_NAME) String serviceName,
+      List<Container> requestContainers) {
+
+    try {
+      if (requestContainers == null || requestContainers.isEmpty()) {
+        throw new YarnException("No containers provided.");
+      }
+      UserGroupInformation ugi = getProxyUser(request);
+      List<String> toUpgrade = new ArrayList<>();
+      for (Container reqContainer : requestContainers) {
+        if (reqContainer.getState() != null &&
+            reqContainer.getState().equals(ContainerState.UPGRADING)) {
+          toUpgrade.add(reqContainer.getComponentInstanceName());
+        }
+      }
+
+      if (!toUpgrade.isEmpty()) {
+        Service service = getServiceFromClient(ugi, serviceName);
+        LOG.info("PUT: upgrade component instances {} for service = {} " +
+            "user = {}", toUpgrade, serviceName, ugi);
+        List<Container> liveContainers = ServiceApiUtil
+            .getLiveContainers(service, toUpgrade);
+
+        return processContainersUpgrade(ugi, service, liveContainers);
+      }
+    } catch (AccessControlException e) {
+      return formatResponse(Response.Status.FORBIDDEN, e.getMessage());
+    } catch (YarnException e) {
+      return formatResponse(Response.Status.BAD_REQUEST, e.getMessage());
+    } catch (IOException | InterruptedException e) {
+      return formatResponse(Response.Status.INTERNAL_SERVER_ERROR,
+          e.getMessage());
+    } catch (UndeclaredThrowableException e) {
+      return formatResponse(Response.Status.INTERNAL_SERVER_ERROR,
+          e.getCause().getMessage());
+    }
+    return Response.status(Status.NO_CONTENT).build();
+  }
+
+  private Response flexService(Service service, UserGroupInformation ugi)
+      throws IOException, InterruptedException {
+    String appName = service.getName();
+    Response response = Response.status(Status.BAD_REQUEST).build();
+    Map<String, String> componentCountStrings = new HashMap<String, String>();
+    for (Component c : service.getComponents()) {
+      componentCountStrings.put(c.getName(),
+          c.getNumberOfContainers().toString());
+    }
+    Integer result = ugi.doAs(new PrivilegedExceptionAction<Integer>() {
+
+      @Override
+      public Integer run() throws YarnException, IOException {
+        int result = 0;
+        ServiceClient sc = new ServiceClient();
+        sc.init(YARN_CONFIG);
+        sc.start();
+        result = sc
+            .actionFlex(appName, componentCountStrings);
+        sc.close();
+        return Integer.valueOf(result);
+      }
+    });
+    if (result == EXIT_SUCCESS) {
+      String message = "Service " + appName + " is successfully flexed.";
+      LOG.info(message);
+      ServiceStatus status = new ServiceStatus();
+      status.setDiagnostics(message);
+      status.setState(ServiceState.ACCEPTED);
+      response = formatResponse(Status.ACCEPTED, status);
+    }
+    return response;
+  }
+
+  private Response updateLifetime(String appName, Service updateAppData,
+      final UserGroupInformation ugi) throws IOException,
+      InterruptedException {
+    String newLifeTime = ugi.doAs(new PrivilegedExceptionAction<String>() {
+      @Override
+      public String run() throws YarnException, IOException {
+        ServiceClient sc = getServiceClient();
+        sc.init(YARN_CONFIG);
+        sc.start();
+        String newLifeTime = sc.updateLifetime(appName,
+            updateAppData.getLifetime());
+        sc.close();
+        return newLifeTime;
+      }
+    });
+    ServiceStatus status = new ServiceStatus();
+    status.setDiagnostics(
+        "Service (" + appName + ")'s lifeTime is updated to " + newLifeTime
+            + ", " + updateAppData.getLifetime() + " seconds remaining");
+    return formatResponse(Status.OK, status);
+  }
+
+  private Response startService(String appName,
+      final UserGroupInformation ugi) throws IOException,
+      InterruptedException {
+    ugi.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws YarnException, IOException {
+        ServiceClient sc = getServiceClient();
+        sc.init(YARN_CONFIG);
+        sc.start();
+        sc.actionStart(appName);
+        sc.close();
+        return null;
+      }
+    });
+    LOG.info("Successfully started service " + appName);
+    ServiceStatus status = new ServiceStatus();
+    status.setDiagnostics("Service " + appName + " is successfully started.");
+    status.setState(ServiceState.ACCEPTED);
+    return formatResponse(Status.OK, status);
+  }
+
+  private Response upgradeService(Service service,
+      final UserGroupInformation ugi) throws IOException, InterruptedException {
+    ServiceStatus status = new ServiceStatus();
+    ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
+      ServiceClient sc = getServiceClient();
+      sc.init(YARN_CONFIG);
+      sc.start();
+      sc.initiateUpgrade(service);
+      sc.close();
+      return null;
+    });
+    LOG.info("Service {} version {} upgrade initialized", service.getName(),
+        service.getVersion());
+    status.setDiagnostics("Service " + service.getName() +
+        " version " + service.getVersion() + " saved.");
+    status.setState(ServiceState.ACCEPTED);
+    return formatResponse(Status.ACCEPTED, status);
+  }
+
+  private Response processComponentsUpgrade(UserGroupInformation ugi,
+      String serviceName, Set<String> compNames) throws YarnException,
+      IOException, InterruptedException {
+    Service service = getServiceFromClient(ugi, serviceName);
+    if (service.getState() != ServiceState.UPGRADING) {
+      throw new YarnException(
+          String.format("The upgrade of service %s has not been initiated.",
+              service.getName()));
+    }
+    List<Container> containersToUpgrade = ServiceApiUtil
+        .validateAndResolveCompsUpgrade(service, compNames);
+    Integer result = invokeContainersUpgrade(ugi, service, containersToUpgrade);
+    if (result == EXIT_SUCCESS) {
+      ServiceStatus status = new ServiceStatus();
+      status.setDiagnostics(
+          "Upgrading components " + Joiner.on(',').join(compNames) + ".");
+      return formatResponse(Response.Status.ACCEPTED, status);
+    }
+    // If result is not a success, consider it a no-op
+    return Response.status(Response.Status.NO_CONTENT).build();
+  }
+
+  private Response processContainersUpgrade(UserGroupInformation ugi,
+      Service service, List<Container> containers) throws YarnException,
+      IOException, InterruptedException {
+
+    if (service.getState() != ServiceState.UPGRADING) {
+      throw new YarnException(
+          String.format("The upgrade of service %s has not been initiated.",
+              service.getName()));
+    }
+    ServiceApiUtil.validateInstancesUpgrade(containers);
+    Integer result = invokeContainersUpgrade(ugi, service, containers);
+    if (result == EXIT_SUCCESS) {
+      ServiceStatus status = new ServiceStatus();
+      status.setDiagnostics(
+          "Upgrading component instances " + containers.stream()
+              .map(Container::getId).collect(Collectors.joining(",")) + ".");
+      return formatResponse(Response.Status.ACCEPTED, status);
+    }
+    // If result is not a success, consider it a no-op
+    return Response.status(Response.Status.NO_CONTENT).build();
+  }
+
+  private int invokeContainersUpgrade(UserGroupInformation ugi,
+      Service service, List<Container> containers) throws IOException,
+      InterruptedException {
+    return ugi.doAs((PrivilegedExceptionAction<Integer>) () -> {
+      int result1;
+      ServiceClient sc = getServiceClient();
+      sc.init(YARN_CONFIG);
+      sc.start();
+      result1 = sc.actionUpgrade(service, containers);
+      sc.close();
+      return result1;
+    });
+  }
+
+  private Service getServiceFromClient(UserGroupInformation ugi,
+      String serviceName) throws IOException, InterruptedException {
+
+    return ugi.doAs((PrivilegedExceptionAction<Service>) () -> {
+      ServiceClient sc = getServiceClient();
+      sc.init(YARN_CONFIG);
+      sc.start();
+      Service app1 = sc.getStatus(serviceName);
+      sc.close();
+      return app1;
+    });
+  }
+
+  /**
+   * Used by negative test case.
+   *
+   * @param mockServerClient - A mocked version of ServiceClient
+   */
+  public void setServiceClient(ServiceClient mockServerClient) {
+    serviceClientUnitTest = mockServerClient;
+    unitTest = true;
+  }
+
+  private ServiceClient getServiceClient() {
+    if (unitTest) {
+      return serviceClientUnitTest;
+    } else {
+      return new ServiceClient();
+    }
+  }
+
+  /**
+   * Configure impersonation callback.
+   *
+   * @param request - web request
+   * @return - configured UGI class for proxy callback
+   * @throws IOException - if user is not login.
+   */
+  private UserGroupInformation getProxyUser(HttpServletRequest request)
+      throws AccessControlException {
+    UserGroupInformation proxyUser;
+    UserGroupInformation ugi;
+    String remoteUser = request.getRemoteUser();
+    try {
+      if (UserGroupInformation.isSecurityEnabled()) {
+        proxyUser = UserGroupInformation.getLoginUser();
+        ugi = UserGroupInformation.createProxyUser(remoteUser, proxyUser);
+      } else {
+        ugi = UserGroupInformation.createRemoteUser(remoteUser);
+      }
+      return ugi;
+    } catch (IOException e) {
+      throw new AccessControlException(e.getCause());
+    }
+  }
+
+  /**
+   * Format HTTP response.
+   *
+   * @param status - HTTP Code
+   * @param message - Diagnostic message
+   * @return - HTTP response
+   */
+  private Response formatResponse(Status status, String message) {
+    ServiceStatus entity = new ServiceStatus();
+    entity.setDiagnostics(message);
+    return formatResponse(status, entity);
+  }
+
+  /**
+   * Format HTTP response.
+   *
+   * @param status - HTTP Code
+   * @param entity - ServiceStatus object
+   * @return - HTTP response
+   */
+  private Response formatResponse(Status status, ServiceStatus entity) {
+    return Response.status(status).entity(entity).build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServerWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServerWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServerWebApp.java
new file mode 100644
index 0000000..f4acd94
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServerWebApp.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.webapp;
+
+import org.apache.hadoop.http.HttpServer2;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AuthenticationFilterInitializer;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
+import org.eclipse.jetty.webapp.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import static org.apache.hadoop.yarn.conf.YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.yarn.conf.YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.*;
+
+/**
+ * This class launches the web service using Hadoop HttpServer2 (which uses
+ * an embedded Jetty container). This is the entry point to your service.
+ * The Java command used to launch this app should call the main method.
+ */
+public class ApiServerWebApp extends AbstractService {
+  private static final Logger logger = LoggerFactory
+      .getLogger(ApiServerWebApp.class);
+  private static final String SEP = ";";
+
+  // REST API server for YARN native services
+  private HttpServer2 apiServer;
+  private InetSocketAddress bindAddress;
+
+  public static void main(String[] args) throws IOException {
+    ApiServerWebApp apiWebApp = new ApiServerWebApp();
+    try {
+      apiWebApp.init(new YarnConfiguration());
+      apiWebApp.serviceStart();
+    } catch (Exception e) {
+      logger.error("Got exception starting", e);
+      apiWebApp.close();
+    }
+  }
+
+  public ApiServerWebApp() {
+    super(ApiServerWebApp.class.getName());
+  }
+
+  @Override
+  protected void serviceStart() throws Exception {
+    bindAddress = getConfig().getSocketAddr(API_SERVER_ADDRESS,
+        DEFAULT_API_SERVER_ADDRESS, DEFAULT_API_SERVER_PORT);
+    logger.info("YARN API server running on " + bindAddress);
+    if (UserGroupInformation.isSecurityEnabled()) {
+      doSecureLogin(getConfig());
+    }
+    startWebApp();
+    super.serviceStart();
+  }
+
+  @Override
+  protected void serviceStop() throws Exception {
+    if (apiServer != null) {
+      apiServer.stop();
+    }
+    super.serviceStop();
+  }
+
+  private void doSecureLogin(org.apache.hadoop.conf.Configuration conf)
+      throws IOException {
+    SecurityUtil.login(conf, YarnConfiguration.RM_KEYTAB,
+        YarnConfiguration.RM_PRINCIPAL, bindAddress.getHostName());
+    addFilters(conf);
+  }
+
+  private void addFilters(org.apache.hadoop.conf.Configuration conf) {
+    // Always load pseudo authentication filter to parse "user.name" in an URL
+    // to identify a HTTP request's user.
+    boolean hasHadoopAuthFilterInitializer = false;
+    String filterInitializerConfKey = "hadoop.http.filter.initializers";
+    Class<?>[] initializersClasses =
+        conf.getClasses(filterInitializerConfKey);
+    List<String> targets = new ArrayList<String>();
+    if (initializersClasses != null) {
+      for (Class<?> initializer : initializersClasses) {
+        if (initializer.getName().equals(
+            AuthenticationFilterInitializer.class.getName())) {
+          hasHadoopAuthFilterInitializer = true;
+          break;
+        }
+        targets.add(initializer.getName());
+      }
+    }
+    if (!hasHadoopAuthFilterInitializer) {
+      targets.add(AuthenticationFilterInitializer.class.getName());
+      conf.set(filterInitializerConfKey, StringUtils.join(",", targets));
+    }
+  }
+
+  private void startWebApp() throws IOException {
+    URI uri = URI.create("http://" + NetUtils.getHostPortString(bindAddress));
+
+    apiServer = new HttpServer2.Builder()
+        .setName("api-server")
+        .setConf(getConfig())
+        .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
+        .setUsernameConfKey(RM_WEBAPP_SPNEGO_USER_NAME_KEY)
+        .setKeytabConfKey(RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY)
+        .addEndpoint(uri).build();
+
+    String apiPackages =
+        ApiServer.class.getPackage().getName() + SEP
+            + GenericExceptionHandler.class.getPackage().getName() + SEP
+            + YarnJacksonJaxbJsonProvider.class.getPackage().getName();
+    apiServer.addJerseyResourcePackage(apiPackages, "/*");
+
+    try {
+      logger.info("Service starting up. Logging start...");
+      apiServer.start();
+      logger.info("Server status = {}", apiServer.toString());
+      for (Configuration conf : apiServer.getWebAppContext()
+          .getConfigurations()) {
+        logger.info("Configurations = {}", conf);
+      }
+      logger.info("Context Path = {}", Collections.singletonList(
+          apiServer.getWebAppContext().getContextPath()));
+      logger.info("ResourceBase = {}", Collections.singletonList(
+          apiServer.getWebAppContext().getResourceBase()));
+      logger.info("War = {}", Collections
+          .singletonList(apiServer.getWebAppContext().getWar()));
+    } catch (Exception ex) {
+      logger.error("Hadoop HttpServer2 App **failed**", ex);
+      throw ex;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/package-info.java
new file mode 100644
index 0000000..1bdf05a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.service.webapp contains classes to be used
+ * for YARN Services API.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.service.webapp;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
new file mode 100644
index 0000000..b7ad6c9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
@@ -0,0 +1,444 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+## Examples
+
+### Create a simple single-component service with most attribute values as defaults
+POST URL - http://localhost:8088/app/v1/services
+
+##### POST Request JSON
+```json
+{
+  "name": "hello-world",
+  "version": "1.0.0",
+  "description": "hello world example",
+  "components" :
+    [
+      {
+        "name": "hello",
+        "number_of_containers": 2,
+        "artifact": {
+          "id": "nginx:latest",
+          "type": "DOCKER"
+        },
+        "launch_command": "./start_nginx.sh",
+        "resource": {
+          "cpus": 1,
+          "memory": "256"
+        }
+      }
+    ]
+}
+```
+
+##### GET Response JSON
+GET URL - http://localhost:8088/app/v1/services/hello-world
+
+Note, lifetime value of -1 means unlimited lifetime.
+
+```json
+{
+    "name": "hello-world",
+    "version": "1.0.0",
+    "description": "hello world example",
+    "id": "application_1503963985568_0002",
+    "lifetime": -1,
+    "state": "STABLE",
+    "components": [
+        {
+            "name": "hello",
+            "state": "STABLE",
+            "resource": {
+                "cpus": 1,
+                "memory": "256"
+            },
+            "configuration": {
+                "properties": {},
+                "env": {},
+                "files": []
+            },
+            "quicklinks": [],
+            "containers": [
+                {
+                    "id": "container_e03_1503963985568_0002_01_000002",
+                    "ip": "10.22.8.143",
+                    "hostname": "ctr-e03-1503963985568-0002-01-000002.example.site",
+                    "state": "READY",
+                    "launch_time": 1504051512412,
+                    "bare_host": "host100.cloud.com",
+                    "component_instance_name": "hello-0"
+                },
+                {
+                    "id": "container_e03_1503963985568_0002_01_000003",
+                    "ip": "10.22.8.144",
+                    "hostname": "ctr-e03-1503963985568-0002-01-000003.example.site",
+                    "state": "READY",
+                    "launch_time": 1504051536450,
+                    "bare_host": "host100.cloud.com",
+                    "component_instance_name": "hello-1"
+                }
+            ],
+            "launch_command": "./start_nginx.sh",
+            "number_of_containers": 1,
+            "run_privileged_container": false
+        }
+    ],
+    "configuration": {
+        "properties": {},
+        "env": {},
+        "files": []
+    },
+    "quicklinks": {}
+}
+
+```
+### Update to modify the lifetime of a service
+PUT URL - http://localhost:8088/app/v1/services/hello-world
+
+##### PUT Request JSON
+
+Note, irrespective of what the current lifetime value is, this update request will set the lifetime of the service to be 3600 seconds (1 hour) from the time the request is submitted. Hence, if a a service has remaining lifetime of 5 mins (say) and would like to extend it to an hour OR if an application has remaining lifetime of 5 hours (say) and would like to reduce it down to an hour, then for both scenarios you need to submit the same request below.
+
+```json
+{
+  "lifetime": 3600
+}
+```
+### Stop a service
+PUT URL - http://localhost:8088/app/v1/services/hello-world
+
+##### PUT Request JSON
+```json
+{
+  "state": "STOPPED"
+}
+```
+
+### Start a service
+PUT URL - http://localhost:8088/app/v1/services/hello-world
+
+##### PUT Request JSON
+```json
+{
+  "state": "STARTED"
+}
+```
+
+### Update to flex up/down the number of containers (instances) of a component of a service
+PUT URL - http://localhost:8088/app/v1/services/hello-world/components/hello
+
+##### PUT Request JSON
+```json
+{
+  "number_of_containers": 3
+}
+```
+
+Alternatively, you can specify the entire "components" section instead.
+
+PUT URL - http://localhost:8088/app/v1/services/hello-world
+##### PUT Request JSON
+```json
+{
+  "state": "FLEX",
+  "components" :
+    [
+      {
+        "name": "hello",
+        "number_of_containers": 3
+      }
+    ]
+}
+```
+
+### Destroy a service
+DELETE URL - http://localhost:8088/app/v1/services/hello-world
+
+***
+
+### Create a complicated service  - HBase
+POST URL - http://localhost:8088:/app/v1/services/hbase-app-1
+
+##### POST Request JSON
+
+```json
+{
+  "name": "hbase-app-1",
+  "version": "1.0.0",
+  "description": "hbase service",
+  "lifetime": "3600",
+  "components": [
+    {
+      "name": "hbasemaster",
+      "number_of_containers": 1,
+      "artifact": {
+        "id": "hbase:latest",
+        "type": "DOCKER"
+      },
+      "launch_command": "/usr/hdp/current/hbase-master/bin/hbase master start",
+      "resource": {
+        "cpus": 1,
+        "memory": "2048"
+      },
+      "configuration": {
+        "env": {
+          "HBASE_LOG_DIR": "<LOG_DIR>"
+        },
+        "files": [
+          {
+            "type": "XML",
+            "dest_file": "/etc/hadoop/conf/core-site.xml",
+            "properties": {
+              "fs.defaultFS": "${CLUSTER_FS_URI}"
+            }
+          },
+          {
+            "type": "XML",
+            "dest_file": "/etc/hbase/conf/hbase-site.xml",
+            "properties": {
+              "hbase.cluster.distributed": "true",
+              "hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}",
+              "hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase",
+              "zookeeper.znode.parent": "${SERVICE_ZK_PATH}",
+              "hbase.master.hostname": "hbasemaster.${SERVICE_NAME}.${USER}.${DOMAIN}",
+              "hbase.master.info.port": "16010"
+            }
+          }
+        ]
+      }
+    },
+    {
+      "name": "regionserver",
+      "number_of_containers": 3,
+      "artifact": {
+        "id": "hbase:latest",
+        "type": "DOCKER"
+      },
+      "launch_command": "/usr/hdp/current/hbase-regionserver/bin/hbase regionserver start",
+      "resource": {
+        "cpus": 1,
+        "memory": "2048"
+      },
+      "configuration": {
+        "env": {
+          "HBASE_LOG_DIR": "<LOG_DIR>"
+        },
+        "files": [
+          {
+            "type": "XML",
+            "dest_file": "/etc/hadoop/conf/core-site.xml",
+            "properties": {
+              "fs.defaultFS": "${CLUSTER_FS_URI}"
+            }
+          },
+          {
+            "type": "XML",
+            "dest_file": "/etc/hbase/conf/hbase-site.xml",
+            "properties": {
+              "hbase.cluster.distributed": "true",
+              "hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}",
+              "hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase",
+              "zookeeper.znode.parent": "${SERVICE_ZK_PATH}",
+              "hbase.master.hostname": "hbasemaster.${SERVICE_NAME}.${USER}.${DOMAIN}",
+              "hbase.master.info.port": "16010",
+              "hbase.regionserver.hostname": "${COMPONENT_INSTANCE_NAME}.${SERVICE_NAME}.${USER}.${DOMAIN}"
+            }
+          }
+        ]
+      }
+    }
+  ],
+  "quicklinks": {
+    "HBase Master Status UI": "http://hbasemaster0.${SERVICE_NAME}.${USER}.${DOMAIN}:16010/master-status",
+    "Proxied HBase Master Status UI": "http://app-proxy/${DOMAIN}/${USER}/${SERVICE_NAME}/hbasemaster/16010/"
+  }
+}
+```
+
+### Create a service requesting GPUs in addition to CPUs and RAM
+POST URL - http://localhost:8088/app/v1/services
+
+##### POST Request JSON
+```json
+{
+  "name": "hello-world",
+  "version": "1.0.0",
+  "description": "hello world example with GPUs",
+  "components" :
+    [
+      {
+        "name": "hello",
+        "number_of_containers": 2,
+        "artifact": {
+          "id": "nginx:latest",
+          "type": "DOCKER"
+        },
+        "launch_command": "./start_nginx.sh",
+        "resource": {
+          "cpus": 1,
+          "memory": "256",
+          "additional" : {
+            "yarn.io/gpu" : {
+              "value" : 4,
+              "unit" : ""
+            }
+          }
+        }
+      }
+    ]
+}
+```
+
+### Create a service with a component requesting anti-affinity placement policy
+POST URL - http://localhost:8088/app/v1/services
+
+##### POST Request JSON
+```json
+{
+  "name": "hello-world",
+  "version": "1.0.0",
+  "description": "hello world example with anti-affinity",
+  "components" :
+    [
+      {
+        "name": "hello",
+        "number_of_containers": 3,
+        "artifact": {
+          "id": "nginx:latest",
+          "type": "DOCKER"
+        },
+        "launch_command": "./start_nginx.sh",
+        "resource": {
+          "cpus": 1,
+          "memory": "256"
+        },
+        "placement_policy": {
+          "constraints": [
+            {
+              "type": "ANTI_AFFINITY",
+              "scope": "NODE",
+              "node_attributes": {
+                "os": ["linux", "windows"],
+                "fault_domain": ["fd1", "fd2"]
+              },
+              "node_partitions": [
+                "gpu",
+                "fast-disk"
+              ],
+              "target_tags": [
+                "hello"
+              ]
+            }
+          ]
+        }
+      }
+    ]
+}
+```
+
+##### GET Response JSON
+GET URL - http://localhost:8088/app/v1/services/hello-world
+
+Note, for an anti-affinity component no more than 1 container will be allocated
+in a specific node. In this example, 3 containers have been requested by
+component "hello". All 3 containers were allocated because the cluster had 3 or
+more NMs. If the cluster had less than 3 NMs then less than 3 containers would
+be allocated. In cases when the number of allocated containers are less than the
+number of requested containers, the component and the service will be in
+non-STABLE state.
+
+```json
+{
+    "name": "hello-world",
+    "version": "1.0.0",
+    "description": "hello world example with anti-affinity",
+    "id": "application_1503963985568_0003",
+    "lifetime": -1,
+    "state": "STABLE",
+    "components": [
+        {
+            "name": "hello",
+            "state": "STABLE",
+            "resource": {
+                "cpus": 1,
+                "memory": "256"
+            },
+            "placement_policy": {
+              "constraints": [
+                {
+                  "type": "ANTI_AFFINITY",
+                  "scope": "NODE",
+                  "node_attributes": {
+                    "os": ["linux", "windows"],
+                    "fault_domain": ["fd1", "fd2"]
+                  },
+                  "node_partitions": [
+                    "gpu",
+                    "fast-disk"
+                  ],
+                  "target_tags": [
+                    "hello"
+                  ]
+                }
+              ]
+            },
+            "configuration": {
+                "properties": {},
+                "env": {},
+                "files": []
+            },
+            "quicklinks": [],
+            "containers": [
+                {
+                    "id": "container_e03_1503963985568_0003_01_000002",
+                    "ip": "10.22.8.143",
+                    "hostname": "ctr-e03-1503963985568-0003-01-000002.example.site",
+                    "state": "READY",
+                    "launch_time": 1504051512412,
+                    "bare_host": "host100.cloud.com",
+                    "component_instance_name": "hello-0"
+                },
+                {
+                    "id": "container_e03_1503963985568_0003_01_000003",
+                    "ip": "10.22.8.144",
+                    "hostname": "ctr-e03-1503963985568-0003-01-000003.example.site",
+                    "state": "READY",
+                    "launch_time": 1504051536450,
+                    "bare_host": "host101.cloud.com",
+                    "component_instance_name": "hello-1"
+                },
+                {
+                    "id": "container_e03_1503963985568_0003_01_000004",
+                    "ip": "10.22.8.145",
+                    "hostname": "ctr-e03-1503963985568-0003-01-000004.example.site",
+                    "state": "READY",
+                    "launch_time": 1504051536450,
+                    "bare_host": "host102.cloud.com",
+                    "component_instance_name": "hello-2"
+                }
+            ],
+            "launch_command": "./start_nginx.sh",
+            "number_of_containers": 1,
+            "run_privileged_container": false
+        }
+    ],
+    "configuration": {
+        "properties": {},
+        "env": {},
+        "files": []
+    },
+    "quicklinks": {}
+}
+```
+


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/js/jquery.min.js
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/js/jquery.min.js b/hadoop-ozone/docs/themes/ozonedoc/static/js/jquery.min.js
new file mode 100644
index 0000000..e836475
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/static/js/jquery.min.js
@@ -0,0 +1,5 @@
+/*! jQuery v1.12.4 | (c) jQuery Foundation | jquery.org/license */
+!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=a.document,e=c.slice,f=c.concat,g=c.push,h=c.indexOf,i={},j=i.toString,k=i.hasOwnProperty,l={},m="1.12.4",n=function(a,b){return new n.fn.init(a,b)},o=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,p=/^-ms-/,q=/-([\da-z])/gi,r=function(a,b){return b.toUpperCase()};n.fn=n.prototype={jquery:m,constructor:n,selector:"",length:0,toArray:function(){return e.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:e.call(this)},pushStack:function(a){var b=n.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a){return n.each(this,a)},map:function(a){return this.pushStack(n.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(e.apply(thi
 s,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor()},push:g,sort:c.sort,splice:c.splice},n.extend=n.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||n.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(e=arguments[h]))for(d in e)a=g[d],c=e[d],g!==c&&(j&&c&&(n.isPlainObject(c)||(b=n.isArray(c)))?(b?(b=!1,f=a&&n.isArray(a)?a:[]):f=a&&n.isPlainObject(a)?a:{},g[d]=n.extend(j,f,c)):void 0!==c&&(g[d]=c));return g},n.extend({expando:"jQuery"+(m+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===n.type(a)},isArray:Array.isArray||function(a){return"array"===n.type(a)},isWindow:function(a){return null!=
 a&&a==a.window},isNumeric:function(a){var b=a&&a.toString();return!n.isArray(a)&&b-parseFloat(b)+1>=0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},isPlainObject:function(a){var b;if(!a||"object"!==n.type(a)||a.nodeType||n.isWindow(a))return!1;try{if(a.constructor&&!k.call(a,"constructor")&&!k.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}if(!l.ownFirst)for(b in a)return k.call(a,b);for(b in a);return void 0===b||k.call(a,b)},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?i[j.call(a)]||"object":typeof a},globalEval:function(b){b&&n.trim(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(p,"ms-").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b){var c,d=0;if(s(a)){for(c=a.length;c>d;d++)if(b.call(a[d],d,a[d])===!1)break}else for(d in a)if(b.call(a[d],d,a[d])===!1)break;return a},trim:function(a){retur
 n null==a?"":(a+"").replace(o,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?n.merge(c,"string"==typeof a?[a]:a):g.call(c,a)),c},inArray:function(a,b,c){var d;if(b){if(h)return h.call(b,a,c);for(d=b.length,c=c?0>c?Math.max(0,d+c):c:0;d>c;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,b){var c=+b.length,d=0,e=a.length;while(c>d)a[e++]=b[d++];if(c!==c)while(void 0!==b[d])a[e++]=b[d++];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,e,g=0,h=[];if(s(a))for(d=a.length;d>g;g++)e=b(a[g],g,c),null!=e&&h.push(e);else for(g in a)e=b(a[g],g,c),null!=e&&h.push(e);return f.apply([],h)},guid:1,proxy:function(a,b){var c,d,f;return"string"==typeof b&&(f=a[b],b=a,a=f),n.isFunction(a)?(c=e.call(arguments,2),d=function(){return a.apply(b||this,c.concat(e.call(arguments)))},d.guid=a.guid=a.guid||n.guid++,d):void 0},now:function(){return+new Date},support:l}),"fu
 nction"==typeof Symbol&&(n.fn[Symbol.iterator]=c[Symbol.iterator]),n.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(a,b){i["[object "+b+"]"]=b.toLowerCase()});function s(a){var b=!!a&&"length"in a&&a.length,c=n.type(a);return"function"===c||n.isWindow(a)?!1:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ga(),z=ga(),A=ga(),B=function(a,b){return a===b&&(l=!0),0},C=1<<31,D={}.hasOwnProperty,E=[],F=E.pop,G=E.push,H=E.push,I=E.slice,J=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},K="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",L="[\\x20\\t\\r\\n\\f]",M="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",N="\\["+L+"*("+M+")(?:"+L+"*([*^$|!~]?=)"+L+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+M+"))|)"+L+"*\\]",O=":("+M+")(?:\\
 ((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+N+")*)|.*)\\)|)",P=new RegExp(L+"+","g"),Q=new RegExp("^"+L+"+|((?:^|[^\\\\])(?:\\\\.)*)"+L+"+$","g"),R=new RegExp("^"+L+"*,"+L+"*"),S=new RegExp("^"+L+"*([>+~]|"+L+")"+L+"*"),T=new RegExp("="+L+"*([^\\]'\"]*?)"+L+"*\\]","g"),U=new RegExp(O),V=new RegExp("^"+M+"$"),W={ID:new RegExp("^#("+M+")"),CLASS:new RegExp("^\\.("+M+")"),TAG:new RegExp("^("+M+"|[*])"),ATTR:new RegExp("^"+N),PSEUDO:new RegExp("^"+O),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+L+"*(even|odd|(([+-]|)(\\d*)n|)"+L+"*(?:([+-]|)"+L+"*(\\d+)|))"+L+"*\\)|)","i"),bool:new RegExp("^(?:"+K+")$","i"),needsContext:new RegExp("^"+L+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+L+"*((?:-\\d)?\\d*)"+L+"*\\)|)(?=[^-]|$)","i")},X=/^(?:input|select|textarea|button)$/i,Y=/^h\d$/i,Z=/^[^{]+\{\s*\[native \w/,$=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,_=/[+~]/,aa=/'|\\/g,ba=new RegExp("\\\\([\\da-f]{1,6}"+L+"?|("+L+")|.)","ig"
 ),ca=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},da=function(){m()};try{H.apply(E=I.call(v.childNodes),v.childNodes),E[v.childNodes.length].nodeType}catch(ea){H={apply:E.length?function(a,b){G.apply(a,I.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fa(a,b,d,e){var f,h,j,k,l,o,r,s,w=b&&b.ownerDocument,x=b?b.nodeType:9;if(d=d||[],"string"!=typeof a||!a||1!==x&&9!==x&&11!==x)return d;if(!e&&((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,p)){if(11!==x&&(o=$.exec(a)))if(f=o[1]){if(9===x){if(!(j=b.getElementById(f)))return d;if(j.id===f)return d.push(j),d}else if(w&&(j=w.getElementById(f))&&t(b,j)&&j.id===f)return d.push(j),d}else{if(o[2])return H.apply(d,b.getElementsByTagName(a)),d;if((f=o[3])&&c.getElementsByClassName&&b.getElementsByClassName)return H.apply(d,b.getElementsByClassName(f)),d}if(c.qsa&&!A[a+" "]&&(!q||!q.test(a))){if(1!==x)w=b,s=a;else if("object
 "!==b.nodeName.toLowerCase()){(k=b.getAttribute("id"))?k=k.replace(aa,"\\$&"):b.setAttribute("id",k=u),r=g(a),h=r.length,l=V.test(k)?"#"+k:"[id='"+k+"']";while(h--)r[h]=l+" "+qa(r[h]);s=r.join(","),w=_.test(a)&&oa(b.parentNode)||b}if(s)try{return H.apply(d,w.querySelectorAll(s)),d}catch(y){}finally{k===u&&b.removeAttribute("id")}}}return i(a.replace(Q,"$1"),b,d,e)}function ga(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ha(a){return a[u]=!0,a}function ia(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ja(a,b){var c=a.split("|"),e=c.length;while(e--)d.attrHandle[c[e]]=b}function ka(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||C)-(~a.sourceIndex||C);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function la(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type
 ===a}}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function na(a){return ha(function(b){return b=+b,ha(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function oa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=fa.support={},f=fa.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=fa.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=n.documentElement,p=!f(n),(e=n.defaultView)&&e.top!==e&&(e.addEventListener?e.addEventListener("unload",da,!1):e.attachEvent&&e.attachEvent("onunload",da)),c.attributes=ia(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ia(function(a){return a.appendChild(n.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=Z.test(n.getElementsByClassName),c.getById=ia
 (function(a){return o.appendChild(a).id=u,!n.getElementsByName||!n.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c?[c]:[]}},d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return"undefined"!=typeof b.getElementsByClassName&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=Z.test(n.querySelectorAll))&&(ia(function(a){o.a
 ppendChild(a).innerHTML="<a id='"+u+"'></a><select id='"+u+"-\r\\' msallowcapture=''><option selected=''></option></select>",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+L+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+L+"*(?:value|"+K+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ia(function(a){var b=n.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+L+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=Z.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ia(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",O)}),q=q.length&&
 new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=Z.test(o.compareDocumentPosition),t=b||Z.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===n||a.ownerDocument===v&&t(v,a)?-1:b===n||b.ownerDocument===v&&t(v,b)?1:k?J(k,a)-J(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,g=[a],h=[b];if(!e||!f)return a===n?-1:b===n?1:e?-1:f?1:k?J(k,a)-J(k,b):0;if(e===f)return ka(a,b);c=a;while(c=c.parentNode)g.unshift(c);c=b;while(c=c.parentNode)h.unshift(c);while(g[d]==
 =h[d])d++;return d?ka(g[d],h[d]):g[d]===v?-1:h[d]===v?1:0},n):n},fa.matches=function(a,b){return fa(a,null,null,b)},fa.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(T,"='$1']"),c.matchesSelector&&p&&!A[b+" "]&&(!r||!r.test(b))&&(!q||!q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fa(b,n,null,[a]).length>0},fa.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fa.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&D.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fa.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},fa.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e]
 ,1)}return k=null,a},e=fa.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fa.selectors={cacheLength:50,createPseudo:ha,match:W,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(ba,ca),a[3]=(a[3]||a[4]||a[5]||"").replace(ba,ca),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||fa.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&fa.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return W.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&U.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.le
 ngth-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(ba,ca).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+L+")"+a+"("+L+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=fa.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(P," ")+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",
 q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h,t=!1;if(q){if(f){while(p){m=b;while(m=m[p])if(h?m.nodeName.toLowerCase()===r:1===m.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){m=q,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n&&j[2],m=n&&q.childNodes[n];while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if(1===m.nodeType&&++t&&m===b){k[a]=[w,n,t];break}}else if(s&&(m=b,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n),t===!1)while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if((h?m.nodeName.toLowerCase()===r:1===m.nodeType)&&++t&&(s&&(l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),k[a]=[w,t]),m===b))break;return t-=e,t===d||t%d===0&&t/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fa.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ha(function(a,c){var d,f=e(a
 ,b),g=f.length;while(g--)d=J(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ha(function(a){var b=[],c=[],d=h(a.replace(Q,"$1"));return d[u]?ha(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ha(function(a){return function(b){return fa(a,b).length>0}}),contains:ha(function(a){return a=a.replace(ba,ca),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ha(function(a){return V.test(a||"")||fa.error("unsupported lang: "+a),a=a.replace(ba,ca).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus()
 )&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Y.test(a.nodeName)},input:function(a){return X.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:na(function(){return[0]}),last:na(function(a,b){return[b-1]}),eq:na(function(a,b,c){return[0>c?c+b:c]}),even:na(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:na(function(a,b){for(var
  c=1;b>c;c+=2)a.push(c);return a}),lt:na(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:na(function(a,b,c){for(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=la(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=ma(b);function pa(){}pa.prototype=d.filters=d.pseudos,d.setFilters=new pa,g=fa.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){c&&!(e=R.exec(h))||(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=S.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(Q," ")}),h=h.slice(c.length));for(g in d.filter)!(e=W[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?fa.error(a):z(a,i).slice(0)};function qa(a){for(var b=0,c=a.length,d="";c>b;b++)d+=a[b].value;return d}function ra(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++
 ;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j,k=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(j=b[u]||(b[u]={}),i=j[b.uniqueID]||(j[b.uniqueID]={}),(h=i[d])&&h[0]===w&&h[1]===f)return k[2]=h[2];if(i[d]=k,k[2]=a(b,c,g))return!0}}}function sa(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function ta(a,b,c){for(var d=0,e=b.length;e>d;d++)fa(a,b[d],c);return c}function ua(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(c&&!c(f,d,e)||(g.push(f),j&&b.push(h)));return g}function va(a,b,c,d,e,f){return d&&!d[u]&&(d=va(d)),e&&!e[u]&&(e=va(e,f)),ha(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||ta(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ua(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ua(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a
 ){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?J(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ua(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):H.apply(g,r)})}function wa(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=ra(function(a){return a===b},h,!0),l=ra(function(a){return J(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];f>i;i++)if(c=d.relative[a[i].type])m=[ra(sa(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return va(i>1&&sa(m),i>1&&qa(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(Q,"$1"),c,e>i&&wa(a.slice(i,e)),f>e&&wa(a=a.slice(e)),f>e&&qa(a))}m.push(c)}return sa(m)}function xa(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,o,q,r=0,s="0",t=f&&[],u=[],v=j,x=f||e&&d.find.TAG("*",k),y=w+=null==v?1:Math.random()||.1,z=x.l
 ength;for(k&&(j=g===n||g||k);s!==z&&null!=(l=x[s]);s++){if(e&&l){o=0,g||l.ownerDocument===n||(m(l),h=!p);while(q=a[o++])if(q(l,g||n,h)){i.push(l);break}k&&(w=y)}c&&((l=!q&&l)&&r--,f&&t.push(l))}if(r+=s,c&&s!==r){o=0;while(q=b[o++])q(t,u,g,h);if(f){if(r>0)while(s--)t[s]||u[s]||(u[s]=F.call(i));u=ua(u)}H.apply(i,u),k&&!f&&u.length>0&&r+b.length>1&&fa.uniqueSort(i)}return k&&(w=y,j=v),t};return c?ha(f):f}return h=fa.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wa(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xa(e,d)),f.selector=a}return f},i=fa.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(ba,ca),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=W.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relati
 ve[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(ba,ca),_.test(j[0].type)&&oa(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qa(j),!a)return H.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,!b||_.test(a)&&oa(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ia(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ia(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||ja("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ia(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ja("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ia(function(a){return null==a.getAttribute("disabled")})||ja(K,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNo
 de(b))&&d.specified?d.value:null}),fa}(a);n.find=t,n.expr=t.selectors,n.expr[":"]=n.expr.pseudos,n.uniqueSort=n.unique=t.uniqueSort,n.text=t.getText,n.isXMLDoc=t.isXML,n.contains=t.contains;var u=function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&n(a).is(c))break;d.push(a)}return d},v=function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c},w=n.expr.match.needsContext,x=/^<([\w-]+)\s*\/?>(?:<\/\1>|)$/,y=/^.[^:#\[\.,]*$/;function z(a,b,c){if(n.isFunction(b))return n.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return n.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(y.test(b))return n.filter(b,a,c);b=n.filter(b,a)}return n.grep(a,function(a){return n.inArray(a,b)>-1!==c})}n.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?n.find.matchesSelector(d,a)?[d]:[]:n.find.matches(a,n.grep(b,function(a){return 1===a.nodeType}))},n.fn.extend({find
 :function(a){var b,c=[],d=this,e=d.length;if("string"!=typeof a)return this.pushStack(n(a).filter(function(){for(b=0;e>b;b++)if(n.contains(d[b],this))return!0}));for(b=0;e>b;b++)n.find(a,d[b],c);return c=this.pushStack(e>1?n.unique(c):c),c.selector=this.selector?this.selector+" "+a:a,c},filter:function(a){return this.pushStack(z(this,a||[],!1))},not:function(a){return this.pushStack(z(this,a||[],!0))},is:function(a){return!!z(this,"string"==typeof a&&w.test(a)?n(a):a||[],!1).length}});var A,B=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,C=n.fn.init=function(a,b,c){var e,f;if(!a)return this;if(c=c||A,"string"==typeof a){if(e="<"===a.charAt(0)&&">"===a.charAt(a.length-1)&&a.length>=3?[null,a,null]:B.exec(a),!e||!e[1]&&b)return!b||b.jquery?(b||c).find(a):this.constructor(b).find(a);if(e[1]){if(b=b instanceof n?b[0]:b,n.merge(this,n.parseHTML(e[1],b&&b.nodeType?b.ownerDocument||b:d,!0)),x.test(e[1])&&n.isPlainObject(b))for(e in b)n.isFunction(this[e])?this[e](b[e]):this.attr(e,b[e]);return thi
 s}if(f=d.getElementById(e[2]),f&&f.parentNode){if(f.id!==e[2])return A.find(a);this.length=1,this[0]=f}return this.context=d,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):n.isFunction(a)?"undefined"!=typeof c.ready?c.ready(a):a(n):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),n.makeArray(a,this))};C.prototype=n.fn,A=n(d);var D=/^(?:parents|prev(?:Until|All))/,E={children:!0,contents:!0,next:!0,prev:!0};n.fn.extend({has:function(a){var b,c=n(a,this),d=c.length;return this.filter(function(){for(b=0;d>b;b++)if(n.contains(this,c[b]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=w.test(a)||"string"!=typeof a?n(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&n.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?n.uniqueSort(f):f)},index:function(a){return a?"string"==typeof a?n.inArray(this[0],n(a)):n.inArray(a
 .jquery?a[0]:a,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(n.uniqueSort(n.merge(this.get(),n(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function F(a,b){do a=a[b];while(a&&1!==a.nodeType);return a}n.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return u(a,"parentNode")},parentsUntil:function(a,b,c){return u(a,"parentNode",c)},next:function(a){return F(a,"nextSibling")},prev:function(a){return F(a,"previousSibling")},nextAll:function(a){return u(a,"nextSibling")},prevAll:function(a){return u(a,"previousSibling")},nextUntil:function(a,b,c){return u(a,"nextSibling",c)},prevUntil:function(a,b,c){return u(a,"previousSibling",c)},siblings:function(a){return v((a.parentNode||{}).firstChild,a)},children:function(a){return v(a.firstChild)},contents:function(a){return n.nodeName(a,"iframe")?a.contentDocument||a.contentWindow
 .document:n.merge([],a.childNodes)}},function(a,b){n.fn[a]=function(c,d){var e=n.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=n.filter(d,e)),this.length>1&&(E[a]||(e=n.uniqueSort(e)),D.test(a)&&(e=e.reverse())),this.pushStack(e)}});var G=/\S+/g;function H(a){var b={};return n.each(a.match(G)||[],function(a,c){b[c]=!0}),b}n.Callbacks=function(a){a="string"==typeof a?H(a):n.extend({},a);var b,c,d,e,f=[],g=[],h=-1,i=function(){for(e=a.once,d=b=!0;g.length;h=-1){c=g.shift();while(++h<f.length)f[h].apply(c[0],c[1])===!1&&a.stopOnFalse&&(h=f.length,c=!1)}a.memory||(c=!1),b=!1,e&&(f=c?[]:"")},j={add:function(){return f&&(c&&!b&&(h=f.length-1,g.push(c)),function d(b){n.each(b,function(b,c){n.isFunction(c)?a.unique&&j.has(c)||f.push(c):c&&c.length&&"string"!==n.type(c)&&d(c)})}(arguments),c&&!b&&i()),this},remove:function(){return n.each(arguments,function(a,b){var c;while((c=n.inArray(b,f,c))>-1)f.splice(c,1),h>=c&&h--}),this},has:function(a){return a?n.inArray
 (a,f)>-1:f.length>0},empty:function(){return f&&(f=[]),this},disable:function(){return e=g=[],f=c="",this},disabled:function(){return!f},lock:function(){return e=!0,c||j.disable(),this},locked:function(){return!!e},fireWith:function(a,c){return e||(c=c||[],c=[a,c.slice?c.slice():c],g.push(c),b||i()),this},fire:function(){return j.fireWith(this,arguments),this},fired:function(){return!!d}};return j},n.extend({Deferred:function(a){var b=[["resolve","done",n.Callbacks("once memory"),"resolved"],["reject","fail",n.Callbacks("once memory"),"rejected"],["notify","progress",n.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return n.Deferred(function(c){n.each(b,function(b,f){var g=n.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&n.isFunction(a.promise)?a.promise().progress(c.notify).done(c.resolve).fail(c.reject):c[f[0]+"With"](this===d?c.promise()
 :this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?n.extend(a,d):d}},e={};return d.pipe=d.then,n.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=e.call(arguments),d=c.length,f=1!==d||a&&n.isFunction(a.promise)?d:0,g=1===f?a:n.Deferred(),h=function(a,b,c){return function(d){b[a]=this,c[a]=arguments.length>1?e.call(arguments):d,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(d>1)for(i=new Array(d),j=new Array(d),k=new Array(d);d>b;b++)c[b]&&n.isFunction(c[b].promise)?c[b].promise().progress(h(b,j,i)).done(h(b,k,c)).fail(g.reject):--f;return f||g.resolveWith(k,c),g.promise()}});var I;n.fn.ready=function(a){return n.ready.promise().done(a),this},n.extend({isReady:!1,readyWait:1,holdReady:function(a){a?n.readyWait++:n.ready(!0)}
 ,ready:function(a){(a===!0?--n.readyWait:n.isReady)||(n.isReady=!0,a!==!0&&--n.readyWait>0||(I.resolveWith(d,[n]),n.fn.triggerHandler&&(n(d).triggerHandler("ready"),n(d).off("ready"))))}});function J(){d.addEventListener?(d.removeEventListener("DOMContentLoaded",K),a.removeEventListener("load",K)):(d.detachEvent("onreadystatechange",K),a.detachEvent("onload",K))}function K(){(d.addEventListener||"load"===a.event.type||"complete"===d.readyState)&&(J(),n.ready())}n.ready.promise=function(b){if(!I)if(I=n.Deferred(),"complete"===d.readyState||"loading"!==d.readyState&&!d.documentElement.doScroll)a.setTimeout(n.ready);else if(d.addEventListener)d.addEventListener("DOMContentLoaded",K),a.addEventListener("load",K);else{d.attachEvent("onreadystatechange",K),a.attachEvent("onload",K);var c=!1;try{c=null==a.frameElement&&d.documentElement}catch(e){}c&&c.doScroll&&!function f(){if(!n.isReady){try{c.doScroll("left")}catch(b){return a.setTimeout(f,50)}J(),n.ready()}}()}return I.promise(b)},n.re
 ady.promise();var L;for(L in n(l))break;l.ownFirst="0"===L,l.inlineBlockNeedsLayout=!1,n(function(){var a,b,c,e;c=d.getElementsByTagName("body")[0],c&&c.style&&(b=d.createElement("div"),e=d.createElement("div"),e.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(e).appendChild(b),"undefined"!=typeof b.style.zoom&&(b.style.cssText="display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1",l.inlineBlockNeedsLayout=a=3===b.offsetWidth,a&&(c.style.zoom=1)),c.removeChild(e))}),function(){var a=d.createElement("div");l.deleteExpando=!0;try{delete a.test}catch(b){l.deleteExpando=!1}a=null}();var M=function(a){var b=n.noData[(a.nodeName+" ").toLowerCase()],c=+a.nodeType||1;return 1!==c&&9!==c?!1:!b||b!==!0&&a.getAttribute("classid")===b},N=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,O=/([A-Z])/g;function P(a,b,c){if(void 0===c&&1===a.nodeType){var d="data-"+b.replace(O,"-$1").toLowerCase();if(c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"fa
 lse"===c?!1:"null"===c?null:+c+""===c?+c:N.test(c)?n.parseJSON(c):c}catch(e){}n.data(a,b,c)}else c=void 0;
+}return c}function Q(a){var b;for(b in a)if(("data"!==b||!n.isEmptyObject(a[b]))&&"toJSON"!==b)return!1;return!0}function R(a,b,d,e){if(M(a)){var f,g,h=n.expando,i=a.nodeType,j=i?n.cache:a,k=i?a[h]:a[h]&&h;if(k&&j[k]&&(e||j[k].data)||void 0!==d||"string"!=typeof b)return k||(k=i?a[h]=c.pop()||n.guid++:h),j[k]||(j[k]=i?{}:{toJSON:n.noop}),"object"!=typeof b&&"function"!=typeof b||(e?j[k]=n.extend(j[k],b):j[k].data=n.extend(j[k].data,b)),g=j[k],e||(g.data||(g.data={}),g=g.data),void 0!==d&&(g[n.camelCase(b)]=d),"string"==typeof b?(f=g[b],null==f&&(f=g[n.camelCase(b)])):f=g,f}}function S(a,b,c){if(M(a)){var d,e,f=a.nodeType,g=f?n.cache:a,h=f?a[n.expando]:n.expando;if(g[h]){if(b&&(d=c?g[h]:g[h].data)){n.isArray(b)?b=b.concat(n.map(b,n.camelCase)):b in d?b=[b]:(b=n.camelCase(b),b=b in d?[b]:b.split(" ")),e=b.length;while(e--)delete d[b[e]];if(c?!Q(d):!n.isEmptyObject(d))return}(c||(delete g[h].data,Q(g[h])))&&(f?n.cleanData([a],!0):l.deleteExpando||g!=g.window?delete g[h]:g[h]=void 0)}}}
 n.extend({cache:{},noData:{"applet ":!0,"embed ":!0,"object ":"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"},hasData:function(a){return a=a.nodeType?n.cache[a[n.expando]]:a[n.expando],!!a&&!Q(a)},data:function(a,b,c){return R(a,b,c)},removeData:function(a,b){return S(a,b)},_data:function(a,b,c){return R(a,b,c,!0)},_removeData:function(a,b){return S(a,b,!0)}}),n.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=n.data(f),1===f.nodeType&&!n._data(f,"parsedAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=n.camelCase(d.slice(5)),P(f,d,e[d])));n._data(f,"parsedAttrs",!0)}return e}return"object"==typeof a?this.each(function(){n.data(this,a)}):arguments.length>1?this.each(function(){n.data(this,a,b)}):f?P(f,a,n.data(f,a)):void 0},removeData:function(a){return this.each(function(){n.removeData(this,a)})}}),n.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=n._data(a,b),c&&(!d||n.isArray(c)?d=n.
 _data(a,b,n.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=n.queue(a,b),d=c.length,e=c.shift(),f=n._queueHooks(a,b),g=function(){n.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return n._data(a,c)||n._data(a,c,{empty:n.Callbacks("once memory").add(function(){n._removeData(a,b+"queue"),n._removeData(a,c)})})}}),n.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length<c?n.queue(this[0],a):void 0===b?this:this.each(function(){var c=n.queue(this,a,b);n._queueHooks(this,a),"fx"===a&&"inprogress"!==c[0]&&n.dequeue(this,a)})},dequeue:function(a){return this.each(function(){n.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,b){var c,d=1,e=n.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};"string"!=typeof a&&(b=a,
 a=void 0),a=a||"fx";while(g--)c=n._data(f[g],a+"queueHooks"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}}),function(){var a;l.shrinkWrapBlocks=function(){if(null!=a)return a;a=!1;var b,c,e;return c=d.getElementsByTagName("body")[0],c&&c.style?(b=d.createElement("div"),e=d.createElement("div"),e.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(e).appendChild(b),"undefined"!=typeof b.style.zoom&&(b.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:1px;width:1px;zoom:1",b.appendChild(d.createElement("div")).style.width="5px",a=3!==b.offsetWidth),c.removeChild(e),a):void 0}}();var T=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,U=new RegExp("^(?:([+-])=|)("+T+")([a-z%]*)$","i"),V=["Top","Right","Bottom","Left"],W=function(a,b){return a=b||a,"none"===n.css(a,"display")||!n.contains(a.ownerDocument,a)};function X(a,b,c,d){var e,f=1,g=20,h=d?functi
 on(){return d.cur()}:function(){return n.css(a,b,"")},i=h(),j=c&&c[3]||(n.cssNumber[b]?"":"px"),k=(n.cssNumber[b]||"px"!==j&&+i)&&U.exec(n.css(a,b));if(k&&k[3]!==j){j=j||k[3],c=c||[],k=+i||1;do f=f||".5",k/=f,n.style(a,b,k+j);while(f!==(f=h()/i)&&1!==f&&--g)}return c&&(k=+k||+i||0,e=c[1]?k+(c[1]+1)*c[2]:+c[2],d&&(d.unit=j,d.start=k,d.end=e)),e}var Y=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===n.type(c)){e=!0;for(h in c)Y(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,n.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(n(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},Z=/^(?:checkbox|radio)$/i,$=/<([\w:-]+)/,_=/^$|\/(?:java|ecma)script/i,aa=/^\s+/,ba="abbr|article|aside|audio|bdi|canvas|data|datalist|details|dialog|figcaption|figure|footer|header|hgroup|main|mark|meter|nav|output|picture|progress|section|summary|template|time|video";function ca(a){var b=ba.split("|"),c=a.cre
 ateDocumentFragment();if(c.createElement)while(b.length)c.createElement(b.pop());return c}!function(){var a=d.createElement("div"),b=d.createDocumentFragment(),c=d.createElement("input");a.innerHTML="  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",l.leadingWhitespace=3===a.firstChild.nodeType,l.tbody=!a.getElementsByTagName("tbody").length,l.htmlSerialize=!!a.getElementsByTagName("link").length,l.html5Clone="<:nav></:nav>"!==d.createElement("nav").cloneNode(!0).outerHTML,c.type="checkbox",c.checked=!0,b.appendChild(c),l.appendChecked=c.checked,a.innerHTML="<textarea>x</textarea>",l.noCloneChecked=!!a.cloneNode(!0).lastChild.defaultValue,b.appendChild(a),c=d.createElement("input"),c.setAttribute("type","radio"),c.setAttribute("checked","checked"),c.setAttribute("name","t"),a.appendChild(c),l.checkClone=a.cloneNode(!0).cloneNode(!0).lastChild.checked,l.noCloneEvent=!!a.addEventListener,a[n.expando]=1,l.attributes=!a.getAttribute(n.expando)}();var da={option:[1,"<se
 lect multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],area:[1,"<map>","</map>"],param:[1,"<object>","</object>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:l.htmlSerialize?[0,"",""]:[1,"X<div>","</div>"]};da.optgroup=da.option,da.tbody=da.tfoot=da.colgroup=da.caption=da.thead,da.th=da.td;function ea(a,b){var c,d,e=0,f="undefined"!=typeof a.getElementsByTagName?a.getElementsByTagName(b||"*"):"undefined"!=typeof a.querySelectorAll?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||n.nodeName(d,b)?f.push(d):n.merge(f,ea(d,b));return void 0===b||b&&n.nodeName(a,b)?n.merge([a],f):f}function fa(a,b){for(var c,d=0;null!=(c=a[d]);d++)n._data(c,"globalEval",!b||n._data(b[d],"globalEval"))}var ga=/<|&#?\w+;/,ha=/<tbody/i;function ia(a){Z.test(a.type)&&(a.defaultChecked=a.check
 ed)}function ja(a,b,c,d,e){for(var f,g,h,i,j,k,m,o=a.length,p=ca(b),q=[],r=0;o>r;r++)if(g=a[r],g||0===g)if("object"===n.type(g))n.merge(q,g.nodeType?[g]:g);else if(ga.test(g)){i=i||p.appendChild(b.createElement("div")),j=($.exec(g)||["",""])[1].toLowerCase(),m=da[j]||da._default,i.innerHTML=m[1]+n.htmlPrefilter(g)+m[2],f=m[0];while(f--)i=i.lastChild;if(!l.leadingWhitespace&&aa.test(g)&&q.push(b.createTextNode(aa.exec(g)[0])),!l.tbody){g="table"!==j||ha.test(g)?"<table>"!==m[1]||ha.test(g)?0:i:i.firstChild,f=g&&g.childNodes.length;while(f--)n.nodeName(k=g.childNodes[f],"tbody")&&!k.childNodes.length&&g.removeChild(k)}n.merge(q,i.childNodes),i.textContent="";while(i.firstChild)i.removeChild(i.firstChild);i=p.lastChild}else q.push(b.createTextNode(g));i&&p.removeChild(i),l.appendChecked||n.grep(ea(q,"input"),ia),r=0;while(g=q[r++])if(d&&n.inArray(g,d)>-1)e&&e.push(g);else if(h=n.contains(g.ownerDocument,g),i=ea(p.appendChild(g),"script"),h&&fa(i),c){f=0;while(g=i[f++])_.test(g.type||""
 )&&c.push(g)}return i=null,p}!function(){var b,c,e=d.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(l[b]=c in a)||(e.setAttribute(c,"t"),l[b]=e.attributes[c].expando===!1);e=null}();var ka=/^(?:input|select|textarea)$/i,la=/^key/,ma=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,na=/^(?:focusinfocus|focusoutblur)$/,oa=/^([^.]*)(?:\.(.+)|)/;function pa(){return!0}function qa(){return!1}function ra(){try{return d.activeElement}catch(a){}}function sa(a,b,c,d,e,f){var g,h;if("object"==typeof b){"string"!=typeof c&&(d=d||c,c=void 0);for(h in b)sa(a,h,c,d,b[h],f);return a}if(null==d&&null==e?(e=c,d=c=void 0):null==e&&("string"==typeof c?(e=d,d=void 0):(e=d,d=c,c=void 0)),e===!1)e=qa;else if(!e)return a;return 1===f&&(g=e,e=function(a){return n().off(a),g.apply(this,arguments)},e.guid=g.guid||(g.guid=n.guid++)),a.each(function(){n.event.add(this,b,e,d,c)})}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=n._data(a);if(r){c.handler&&(i=c,c=i.
 handler,e=i.selector),c.guid||(c.guid=n.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return"undefined"==typeof n||a&&n.event.triggered===a.type?void 0:n.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(G)||[""],h=b.length;while(h--)f=oa.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=n.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=n.event.special[o]||{},l=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(".")},i),(m=g[o])||(m=g[o]=[],m.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,l):m.push(l),n.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=n.hasData(a)&&n._data(a);if(r&&(k=r.events)){b=(b||"").match(G)
 ||[""],j=b.length;while(j--)if(h=oa.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=m.length;while(f--)g=m[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(m.splice(f,1),g.selector&&m.delegateCount--,l.remove&&l.remove.call(a,g));i&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(k)&&(delete r.handle,n._removeData(a,"events"))}},trigger:function(b,c,e,f){var g,h,i,j,l,m,o,p=[e||d],q=k.call(b,"type")?b.type:b,r=k.call(b,"namespace")?b.namespace.split("."):[];if(i=m=e=e||d,3!==e.nodeType&&8!==e.nodeType&&!na.test(q+n.event.triggered)&&(q.indexOf(".")>-1&&(r=q.split("."),q=r.shift(),r.sort()),h=q.indexOf(":")<0&&"on"+q,b=b[n.expando]?b:new n.Event(q,"object"==ty
 peof b&&b),b.isTrigger=f?2:3,b.namespace=r.join("."),b.rnamespace=b.namespace?new RegExp("(^|\\.)"+r.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=e),c=null==c?[b]:n.makeArray(c,[b]),l=n.event.special[q]||{},f||!l.trigger||l.trigger.apply(e,c)!==!1)){if(!f&&!l.noBubble&&!n.isWindow(e)){for(j=l.delegateType||q,na.test(j+q)||(i=i.parentNode);i;i=i.parentNode)p.push(i),m=i;m===(e.ownerDocument||d)&&p.push(m.defaultView||m.parentWindow||a)}o=0;while((i=p[o++])&&!b.isPropagationStopped())b.type=o>1?j:l.bindType||q,g=(n._data(i,"events")||{})[b.type]&&n._data(i,"handle"),g&&g.apply(i,c),g=h&&i[h],g&&g.apply&&M(i)&&(b.result=g.apply(i,c),b.result===!1&&b.preventDefault());if(b.type=q,!f&&!b.isDefaultPrevented()&&(!l._default||l._default.apply(p.pop(),c)===!1)&&M(e)&&h&&e[q]&&!n.isWindow(e)){m=e[h],m&&(e[h]=null),n.event.triggered=q;try{e[q]()}catch(s){}n.event.triggered=void 0,m&&(e[h]=m)}return b.result}},dispatch:function(a){a=n.event.fix(a);var b,c,d,f,g,h=[]
 ,i=e.call(arguments),j=(n._data(this,"events")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())a.rnamespace&&!a.rnamespace.test(g.namespace)||(a.handleObj=g,a.data=g.data,d=((n.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==d&&(a.result=d)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&("click"!==a.type||isNaN(a.button)||a.button<1))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+" ",void 0===d[e]&&(d[e]=f.needsContext?n(e,this).index(i)>-1:n.find(e,this,null,[i]).length),d[e]&&d
 .push(f);d.length&&g.push({elem:i,handlers:d})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},fix:function(a){if(a[n.expando])return a;var b,c,e,f=a.type,g=a,h=this.fixHooks[f];h||(this.fixHooks[f]=h=ma.test(f)?this.mouseHooks:la.test(f)?this.keyHooks:{}),e=h.props?this.props.concat(h.props):this.props,a=new n.Event(g),b=e.length;while(b--)c=e[b],a[c]=g[c];return a.target||(a.target=g.srcElement||d),3===a.target.nodeType&&(a.target=a.target.parentNode),a.metaKey=!!a.metaKey,h.filter?h.filter(a,g):a},props:"altKey bubbles cancelable ctrlKey currentTarget detail eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,b){var c,e,f,g=b.button,h
 =b.fromElement;return null==a.pageX&&null!=b.clientX&&(e=a.target.ownerDocument||d,f=e.documentElement,c=e.body,a.pageX=b.clientX+(f&&f.scrollLeft||c&&c.scrollLeft||0)-(f&&f.clientLeft||c&&c.clientLeft||0),a.pageY=b.clientY+(f&&f.scrollTop||c&&c.scrollTop||0)-(f&&f.clientTop||c&&c.clientTop||0)),!a.relatedTarget&&h&&(a.relatedTarget=h===a.target?b.toElement:h),a.which||void 0===g||(a.which=1&g?1:2&g?3:4&g?2:0),a}},special:{load:{noBubble:!0},focus:{trigger:function(){if(this!==ra()&&this.focus)try{return this.focus(),!1}catch(a){}},delegateType:"focusin"},blur:{trigger:function(){return this===ra()&&this.blur?(this.blur(),!1):void 0},delegateType:"focusout"},click:{trigger:function(){return n.nodeName(this,"input")&&"checkbox"===this.type&&this.click?(this.click(),!1):void 0},_default:function(a){return n.nodeName(a.target,"a")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&a.originalEvent&&(a.originalEvent.returnValue=a.result)}}},simulate:function(a,b,c){var d=n.exten
 d(new n.Event,c,{type:a,isSimulated:!0});n.event.trigger(d,null,b),d.isDefaultPrevented()&&c.preventDefault()}},n.removeEvent=d.removeEventListener?function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c)}:function(a,b,c){var d="on"+b;a.detachEvent&&("undefined"==typeof a[d]&&(a[d]=null),a.detachEvent(d,c))},n.Event=function(a,b){return this instanceof n.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.returnValue===!1?pa:qa):this.type=a,b&&n.extend(this,b),this.timeStamp=a&&a.timeStamp||n.now(),void(this[n.expando]=!0)):new n.Event(a,b)},n.Event.prototype={constructor:n.Event,isDefaultPrevented:qa,isPropagationStopped:qa,isImmediatePropagationStopped:qa,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=pa,a&&(a.preventDefault?a.preventDefault():a.returnValue=!1)},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=pa,a&&!this.isSimulated&&(a.
 stopPropagation&&a.stopPropagation(),a.cancelBubble=!0)},stopImmediatePropagation:function(){var a=this.originalEvent;this.isImmediatePropagationStopped=pa,a&&a.stopImmediatePropagation&&a.stopImmediatePropagation(),this.stopPropagation()}},n.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(a,b){n.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return e&&(e===d||n.contains(d,e))||(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),l.submit||(n.event.special.submit={setup:function(){return n.nodeName(this,"form")?!1:void n.event.add(this,"click._submit keypress._submit",function(a){var b=a.target,c=n.nodeName(b,"input")||n.nodeName(b,"button")?n.prop(b,"form"):void 0;c&&!n._data(c,"submit")&&(n.event.add(c,"submit._submit",function(a){a._submitBubble=!0}),n._data(c,"submit",!0))})},postDispatch:function(a){a._submitBubble&&(delete a._submitBubb
 le,this.parentNode&&!a.isTrigger&&n.event.simulate("submit",this.parentNode,a))},teardown:function(){return n.nodeName(this,"form")?!1:void n.event.remove(this,"._submit")}}),l.change||(n.event.special.change={setup:function(){return ka.test(this.nodeName)?("checkbox"!==this.type&&"radio"!==this.type||(n.event.add(this,"propertychange._change",function(a){"checked"===a.originalEvent.propertyName&&(this._justChanged=!0)}),n.event.add(this,"click._change",function(a){this._justChanged&&!a.isTrigger&&(this._justChanged=!1),n.event.simulate("change",this,a)})),!1):void n.event.add(this,"beforeactivate._change",function(a){var b=a.target;ka.test(b.nodeName)&&!n._data(b,"change")&&(n.event.add(b,"change._change",function(a){!this.parentNode||a.isSimulated||a.isTrigger||n.event.simulate("change",this.parentNode,a)}),n._data(b,"change",!0))})},handle:function(a){var b=a.target;return this!==b||a.isSimulated||a.isTrigger||"radio"!==b.type&&"checkbox"!==b.type?a.handleObj.handler.apply(this,a
 rguments):void 0},teardown:function(){return n.event.remove(this,"._change"),!ka.test(this.nodeName)}}),l.focusin||n.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){n.event.simulate(b,a.target,n.event.fix(a))};n.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=n._data(d,b);e||d.addEventListener(a,c,!0),n._data(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=n._data(d,b)-1;e?n._data(d,b,e):(d.removeEventListener(a,c,!0),n._removeData(d,b))}}}),n.fn.extend({on:function(a,b,c,d){return sa(this,a,b,c,d)},one:function(a,b,c,d){return sa(this,a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,n(a.delegateTarget).off(d.namespace?d.origType+"."+d.namespace:d.origType,d.selector,d.handler),this;if("object"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return b!==!1&&"function"!=typeof b||(c=b,b=void 0),c===!1&&(c=qa),this.each(function(){n.event.remove(this,a,c,b)})},trigger:fun
 ction(a,b){return this.each(function(){n.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?n.event.trigger(a,b,c,!0):void 0}});var ta=/ jQuery\d+="(?:null|\d+)"/g,ua=new RegExp("<(?:"+ba+")[\\s/>]","i"),va=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:-]+)[^>]*)\/>/gi,wa=/<script|<style|<link/i,xa=/checked\s*(?:[^=]|=\s*.checked.)/i,ya=/^true\/(.*)/,za=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g,Aa=ca(d),Ba=Aa.appendChild(d.createElement("div"));function Ca(a,b){return n.nodeName(a,"table")&&n.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function Da(a){return a.type=(null!==n.find.attr(a,"type"))+"/"+a.type,a}function Ea(a){var b=ya.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function Fa(a,b){if(1===b.nodeType&&n.hasData(a)){var c,d,e,f=n._data(a),g=n._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].le
 ngth;e>d;d++)n.event.add(b,c,h[c][d])}g.data&&(g.data=n.extend({},g.data))}}function Ga(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!l.noCloneEvent&&b[n.expando]){e=n._data(b);for(d in e.events)n.removeEvent(b,d,e.handle);b.removeAttribute(n.expando)}"script"===c&&b.text!==a.text?(Da(b).text=a.text,Ea(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),l.html5Clone&&a.innerHTML&&!n.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&Z.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:"input"!==c&&"textarea"!==c||(b.defaultValue=a.defaultValue)}}function Ha(a,b,c,d){b=f.apply([],b);var e,g,h,i,j,k,m=0,o=a.length,p=o-1,q=b[0],r=n.isFunction(q);if(r||o>1&&"string"==typeof q&&!l.checkClone&&xa.test(q))return a.each(function(e){var f=a.eq(e);r&&(b[0]=q.call(this,e,f.html())),Ha(f,b,c,d)});if(o&&(k=ja(b,a[0].ownerDocument,!1,a,d),e=k.firstChild,1===k.childNod
 es.length&&(k=e),e||d)){for(i=n.map(ea(k,"script"),Da),h=i.length;o>m;m++)g=k,m!==p&&(g=n.clone(g,!0,!0),h&&n.merge(i,ea(g,"script"))),c.call(a[m],g,m);if(h)for(j=i[i.length-1].ownerDocument,n.map(i,Ea),m=0;h>m;m++)g=i[m],_.test(g.type||"")&&!n._data(g,"globalEval")&&n.contains(j,g)&&(g.src?n._evalUrl&&n._evalUrl(g.src):n.globalEval((g.text||g.textContent||g.innerHTML||"").replace(za,"")));k=e=null}return a}function Ia(a,b,c){for(var d,e=b?n.filter(b,a):a,f=0;null!=(d=e[f]);f++)c||1!==d.nodeType||n.cleanData(ea(d)),d.parentNode&&(c&&n.contains(d.ownerDocument,d)&&fa(ea(d,"script")),d.parentNode.removeChild(d));return a}n.extend({htmlPrefilter:function(a){return a.replace(va,"<$1></$2>")},clone:function(a,b,c){var d,e,f,g,h,i=n.contains(a.ownerDocument,a);if(l.html5Clone||n.isXMLDoc(a)||!ua.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(Ba.innerHTML=a.outerHTML,Ba.removeChild(f=Ba.firstChild)),!(l.noCloneEvent&&l.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(d=ea(f
 ),h=ea(a),g=0;null!=(e=h[g]);++g)d[g]&&Ga(e,d[g]);if(b)if(c)for(h=h||ea(a),d=d||ea(f),g=0;null!=(e=h[g]);g++)Fa(e,d[g]);else Fa(a,f);return d=ea(f,"script"),d.length>0&&fa(d,!i&&ea(a,"script")),d=h=e=null,f},cleanData:function(a,b){for(var d,e,f,g,h=0,i=n.expando,j=n.cache,k=l.attributes,m=n.event.special;null!=(d=a[h]);h++)if((b||M(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)m[e]?n.event.remove(d,e):n.removeEvent(d,e,g.handle);j[f]&&(delete j[f],k||"undefined"==typeof d.removeAttribute?d[i]=void 0:d.removeAttribute(i),c.push(f))}}}),n.fn.extend({domManip:Ha,detach:function(a){return Ia(this,a,!0)},remove:function(a){return Ia(this,a)},text:function(a){return Y(this,function(a){return void 0===a?n.text(this):this.empty().append((this[0]&&this[0].ownerDocument||d).createTextNode(a))},null,a,arguments.length)},append:function(){return Ha(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Ca(this,a);b.appendChild(a)}})},prepend:func
 tion(){return Ha(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Ca(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return Ha(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return Ha(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&n.cleanData(ea(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&n.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return Y(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(ta,""):void 0;if("string"==typeof a&&!wa.test(a)&&(l.htmlSerialize||!ua.test(a))&&(l.leadingWhitespace||!aa.test(a))&&!da[($.exec(a)||["",""])[1].toLowerCase(
 )]){a=n.htmlPrefilter(a);try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(ea(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=[];return Ha(this,arguments,function(b){var c=this.parentNode;n.inArray(this,a)<0&&(n.cleanData(ea(this)),c&&c.replaceChild(b,this))},a)}}),n.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){n.fn[a]=function(a){for(var c,d=0,e=[],f=n(a),h=f.length-1;h>=d;d++)c=d===h?this:this.clone(!0),n(f[d])[b](c),g.apply(e,c.get());return this.pushStack(e)}});var Ja,Ka={HTML:"block",BODY:"block"};function La(a,b){var c=n(b.createElement(a)).appendTo(b.body),d=n.css(c[0],"display");return c.detach(),d}function Ma(a){var b=d,c=Ka[a];return c||(c=La(a,b),"none"!==c&&c||(Ja=(Ja||n("<iframe frameborder='0' width='0' height='0'/>")).appendTo(b.documentElement),b=(Ja[0].contentWindow||Ja[0].contentDocument).document,b.write(
 ),b.close(),c=La(a,b),Ja.detach()),Ka[a]=c),c}var Na=/^margin/,Oa=new RegExp("^("+T+")(?!px)[a-z%]+$","i"),Pa=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e},Qa=d.documentElement;!function(){var b,c,e,f,g,h,i=d.createElement("div"),j=d.createElement("div");if(j.style){j.style.cssText="float:left;opacity:.5",l.opacity="0.5"===j.style.opacity,l.cssFloat=!!j.style.cssFloat,j.style.backgroundClip="content-box",j.cloneNode(!0).style.backgroundClip="",l.clearCloneStyle="content-box"===j.style.backgroundClip,i=d.createElement("div"),i.style.cssText="border:0;width:8px;height:0;top:0;left:-9999px;padding:0;margin-top:1px;position:absolute",j.innerHTML="",i.appendChild(j),l.boxSizing=""===j.style.boxSizing||""===j.style.MozBoxSizing||""===j.style.WebkitBoxSizing,n.extend(l,{reliableHiddenOffsets:function(){return null==b&&k(),f},boxSizingReliable:function(){return null==b&&k(),e},pixelMarginRight:function(){ret
 urn null==b&&k(),c},pixelPosition:function(){return null==b&&k(),b},reliableMarginRight:function(){return null==b&&k(),g},reliableMarginLeft:function(){return null==b&&k(),h}});function k(){var k,l,m=d.documentElement;m.appendChild(i),j.style.cssText="-webkit-box-sizing:border-box;box-sizing:border-box;position:relative;display:block;margin:auto;border:1px;padding:1px;top:1%;width:50%",b=e=h=!1,c=g=!0,a.getComputedStyle&&(l=a.getComputedStyle(j),b="1%"!==(l||{}).top,h="2px"===(l||{}).marginLeft,e="4px"===(l||{width:"4px"}).width,j.style.marginRight="50%",c="4px"===(l||{marginRight:"4px"}).marginRight,k=j.appendChild(d.createElement("div")),k.style.cssText=j.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:0",k.style.marginRight=k.style.width="0",j.style.width="1px",g=!parseFloat((a.getComputedStyle(k)||{}).marginRight),j.removeChild(k)),j.style.display="none",f=0===j.getClientRects().length,f&&(j
 .style.display="",j.innerHTML="<table><tr><td></td><td>t</td></tr></table>",j.childNodes[0].style.borderCollapse="separate",k=j.getElementsByTagName("td"),k[0].style.cssText="margin:0;border:0;padding:0;display:none",f=0===k[0].offsetHeight,f&&(k[0].style.display="",k[1].style.display="none",f=0===k[0].offsetHeight)),m.removeChild(i)}}}();var Ra,Sa,Ta=/^(top|right|bottom|left)$/;a.getComputedStyle?(Ra=function(b){var c=b.ownerDocument.defaultView;return c&&c.opener||(c=a),c.getComputedStyle(b)},Sa=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ra(a),g=c?c.getPropertyValue(b)||c[b]:void 0,""!==g&&void 0!==g||n.contains(a.ownerDocument,a)||(g=n.style(a,b)),c&&!l.pixelMarginRight()&&Oa.test(g)&&Na.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f),void 0===g?g:g+""}):Qa.currentStyle&&(Ra=function(a){return a.currentStyle},Sa=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ra(a),g=c?c[b]:void 0,null==g&&h&&h[
 b]&&(g=h[b]),Oa.test(g)&&!Ta.test(b)&&(d=h.left,e=a.runtimeStyle,f=e&&e.left,f&&(e.left=a.currentStyle.left),h.left="fontSize"===b?"1em":g,g=h.pixelLeft+"px",h.left=d,f&&(e.left=f)),void 0===g?g:g+""||"auto"});function Ua(a,b){return{get:function(){return a()?void delete this.get:(this.get=b).apply(this,arguments)}}}var Va=/alpha\([^)]*\)/i,Wa=/opacity\s*=\s*([^)]*)/i,Xa=/^(none|table(?!-c[ea]).+)/,Ya=new RegExp("^("+T+")(.*)$","i"),Za={position:"absolute",visibility:"hidden",display:"block"},$a={letterSpacing:"0",fontWeight:"400"},_a=["Webkit","O","Moz","ms"],ab=d.createElement("div").style;function bb(a){if(a in ab)return a;var b=a.charAt(0).toUpperCase()+a.slice(1),c=_a.length;while(c--)if(a=_a[c]+b,a in ab)return a}function cb(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=n._data(d,"olddisplay"),c=d.style.display,b?(f[g]||"none"!==c||(d.style.display=""),""===d.style.display&&W(d)&&(f[g]=n._data(d,"olddisplay",Ma(d.nodeName)))):(e=W(d),(c&&"none"!==c||!e)&
 &n._data(d,"olddisplay",e?c:n.css(d,"display"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&"none"!==d.style.display&&""!==d.style.display||(d.style.display=b?f[g]||"":"none"));return a}function db(a,b,c){var d=Ya.exec(b);return d?Math.max(0,d[1]-(c||0))+(d[2]||"px"):b}function eb(a,b,c,d,e){for(var f=c===(d?"border":"content")?4:"width"===b?1:0,g=0;4>f;f+=2)"margin"===c&&(g+=n.css(a,c+V[f],!0,e)),d?("content"===c&&(g-=n.css(a,"padding"+V[f],!0,e)),"margin"!==c&&(g-=n.css(a,"border"+V[f]+"Width",!0,e))):(g+=n.css(a,"padding"+V[f],!0,e),"padding"!==c&&(g+=n.css(a,"border"+V[f]+"Width",!0,e)));return g}function fb(a,b,c){var d=!0,e="width"===b?a.offsetWidth:a.offsetHeight,f=Ra(a),g=l.boxSizing&&"border-box"===n.css(a,"boxSizing",!1,f);if(0>=e||null==e){if(e=Sa(a,b,f),(0>e||null==e)&&(e=a.style[b]),Oa.test(e))return e;d=g&&(l.boxSizingReliable()||e===a.style[b]),e=parseFloat(e)||0}return e+eb(a,b,c||(g?"border":"content"),d,f)+"px"}n.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=S
 a(a,"opacity");return""===c?"1":c}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":l.cssFloat?"cssFloat":"styleFloat"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=n.camelCase(b),i=a.style;if(b=n.cssProps[h]||(n.cssProps[h]=bb(h)||h),g=n.cssHooks[b]||n.cssHooks[h],void 0===c)return g&&"get"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b];if(f=typeof c,"string"===f&&(e=U.exec(c))&&e[1]&&(c=X(a,b,e),f="number"),null!=c&&c===c&&("number"===f&&(c+=e&&e[3]||(n.cssNumber[h]?"":"px")),l.clearCloneStyle||""!==c||0!==b.indexOf("background")||(i[b]="inherit"),!(g&&"set"in g&&void 0===(c=g.set(a,c,d)))))try{i[b]=c}catch(j){}}},css:function(a,b,c,d){var e,f,g,h=n.camelCase(b);return b=n.cssProps[h]||(n.cssProps[h]=bb(h)||h),g=n.cssHooks[b]||n.cssHooks[h],g&&"get"in g&&(f=g.get(a,!0,c)),void 0===f&&(f=Sa(a,b,d)),"n
 ormal"===f&&b in $a&&(f=$a[b]),""===c||c?(e=parseFloat(f),c===!0||isFinite(e)?e||0:f):f}}),n.each(["height","width"],function(a,b){n.cssHooks[b]={get:function(a,c,d){return c?Xa.test(n.css(a,"display"))&&0===a.offsetWidth?Pa(a,Za,function(){return fb(a,b,d)}):fb(a,b,d):void 0},set:function(a,c,d){var e=d&&Ra(a);return db(a,c,d?eb(a,b,d,l.boxSizing&&"border-box"===n.css(a,"boxSizing",!1,e),e):0)}}}),l.opacity||(n.cssHooks.opacity={get:function(a,b){return Wa.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?.01*parseFloat(RegExp.$1)+"":b?"1":""},set:function(a,b){var c=a.style,d=a.currentStyle,e=n.isNumeric(b)?"alpha(opacity="+100*b+")":"",f=d&&d.filter||c.filter||"";c.zoom=1,(b>=1||""===b)&&""===n.trim(f.replace(Va,""))&&c.removeAttribute&&(c.removeAttribute("filter"),""===b||d&&!d.filter)||(c.filter=Va.test(f)?f.replace(Va,e):f+" "+e)}}),n.cssHooks.marginRight=Ua(l.reliableMarginRight,function(a,b){return b?Pa(a,{display:"inline-block"},Sa,[a,"marginRight"]):void 0
 }),n.cssHooks.marginLeft=Ua(l.reliableMarginLeft,function(a,b){return b?(parseFloat(Sa(a,"marginLeft"))||(n.contains(a.ownerDocument,a)?a.getBoundingClientRect().left-Pa(a,{
+marginLeft:0},function(){return a.getBoundingClientRect().left}):0))+"px":void 0}),n.each({margin:"",padding:"",border:"Width"},function(a,b){n.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f="string"==typeof c?c.split(" "):[c];4>d;d++)e[a+V[d]+b]=f[d]||f[d-2]||f[0];return e}},Na.test(a)||(n.cssHooks[a+b].set=db)}),n.fn.extend({css:function(a,b){return Y(this,function(a,b,c){var d,e,f={},g=0;if(n.isArray(b)){for(d=Ra(a),e=b.length;e>g;g++)f[b[g]]=n.css(a,b[g],!1,d);return f}return void 0!==c?n.style(a,b,c):n.css(a,b)},a,b,arguments.length>1)},show:function(){return cb(this,!0)},hide:function(){return cb(this)},toggle:function(a){return"boolean"==typeof a?a?this.show():this.hide():this.each(function(){W(this)?n(this).show():n(this).hide()})}});function gb(a,b,c,d,e){return new gb.prototype.init(a,b,c,d,e)}n.Tween=gb,gb.prototype={constructor:gb,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||n.easing._default,this.options=b,this.start=this.now=this.cur(),this.e
 nd=d,this.unit=f||(n.cssNumber[c]?"":"px")},cur:function(){var a=gb.propHooks[this.prop];return a&&a.get?a.get(this):gb.propHooks._default.get(this)},run:function(a){var b,c=gb.propHooks[this.prop];return this.options.duration?this.pos=b=n.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):this.pos=b=a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):gb.propHooks._default.set(this),this}},gb.prototype.init.prototype=gb.prototype,gb.propHooks={_default:{get:function(a){var b;return 1!==a.elem.nodeType||null!=a.elem[a.prop]&&null==a.elem.style[a.prop]?a.elem[a.prop]:(b=n.css(a.elem,a.prop,""),b&&"auto"!==b?b:0)},set:function(a){n.fx.step[a.prop]?n.fx.step[a.prop](a):1!==a.elem.nodeType||null==a.elem.style[n.cssProps[a.prop]]&&!n.cssHooks[a.prop]?a.elem[a.prop]=a.now:n.style(a.elem,a.prop,a.now+a.unit)}}},gb.propHooks.scrollTop=gb.propHooks.scrollLeft={set:function(a){a.elem.nodeTyp
 e&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},n.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2},_default:"swing"},n.fx=gb.prototype.init,n.fx.step={};var hb,ib,jb=/^(?:toggle|show|hide)$/,kb=/queueHooks$/;function lb(){return a.setTimeout(function(){hb=void 0}),hb=n.now()}function mb(a,b){var c,d={height:a},e=0;for(b=b?1:0;4>e;e+=2-b)c=V[e],d["margin"+c]=d["padding"+c]=a;return b&&(d.opacity=d.width=a),d}function nb(a,b,c){for(var d,e=(qb.tweeners[b]||[]).concat(qb.tweeners["*"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function ob(a,b,c){var d,e,f,g,h,i,j,k,m=this,o={},p=a.style,q=a.nodeType&&W(a),r=n._data(a,"fxshow");c.queue||(h=n._queueHooks(a,"fx"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,m.always(function(){m.always(function(){h.unqueued--,n.queue(a,"fx").length||h.empty.fire()})})),1===a.nodeType&&("height"in b||"width"in b)&&(c.overflow=[p.overflow,p.overflowX,p.
 overflowY],j=n.css(a,"display"),k="none"===j?n._data(a,"olddisplay")||Ma(a.nodeName):j,"inline"===k&&"none"===n.css(a,"float")&&(l.inlineBlockNeedsLayout&&"inline"!==Ma(a.nodeName)?p.zoom=1:p.display="inline-block")),c.overflow&&(p.overflow="hidden",l.shrinkWrapBlocks()||m.always(function(){p.overflow=c.overflow[0],p.overflowX=c.overflow[1],p.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],jb.exec(e)){if(delete b[d],f=f||"toggle"===e,e===(q?"hide":"show")){if("show"!==e||!r||void 0===r[d])continue;q=!0}o[d]=r&&r[d]||n.style(a,d)}else j=void 0;if(n.isEmptyObject(o))"inline"===("none"===j?Ma(a.nodeName):j)&&(p.display=j);else{r?"hidden"in r&&(q=r.hidden):r=n._data(a,"fxshow",{}),f&&(r.hidden=!q),q?n(a).show():m.done(function(){n(a).hide()}),m.done(function(){var b;n._removeData(a,"fxshow");for(b in o)n.style(a,b,o[b])});for(d in o)g=nb(q?r[d]:0,d,m),d in r||(r[d]=g.start,q&&(g.end=g.start,g.start="width"===d||"height"===d?1:0))}}function pb(a,b){var c,d,e,f,g;for(c in a)if(d=n.camelCa
 se(c),e=b[d],f=a[c],n.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=n.cssHooks[d],g&&"expand"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function qb(a,b,c){var d,e,f=0,g=qb.prefilters.length,h=n.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=hb||lb(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:n.extend({},b),opts:n.extend(!0,{specialEasing:{},easing:n.easing._default},c),originalProperties:b,originalOptions:c,startTime:hb||lb(),duration:c.duration,tweens:[],createTween:function(b,c){var d=n.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?(h.notifyWith(a,[j,1,0]),h.resolveWith(a,[j,b])):h.reje
 ctWith(a,[j,b]),this}}),k=j.props;for(pb(k,j.opts.specialEasing);g>f;f++)if(d=qb.prefilters[f].call(j,a,k,j.opts))return n.isFunction(d.stop)&&(n._queueHooks(j.elem,j.opts.queue).stop=n.proxy(d.stop,d)),d;return n.map(k,nb,j),n.isFunction(j.opts.start)&&j.opts.start.call(a,j),n.fx.timer(n.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}n.Animation=n.extend(qb,{tweeners:{"*":[function(a,b){var c=this.createTween(a,b);return X(c.elem,a,U.exec(b),c),c}]},tweener:function(a,b){n.isFunction(a)?(b=a,a=["*"]):a=a.match(G);for(var c,d=0,e=a.length;e>d;d++)c=a[d],qb.tweeners[c]=qb.tweeners[c]||[],qb.tweeners[c].unshift(b)},prefilters:[ob],prefilter:function(a,b){b?qb.prefilters.unshift(a):qb.prefilters.push(a)}}),n.speed=function(a,b,c){var d=a&&"object"==typeof a?n.extend({},a):{complete:c||!c&&b||n.isFunction(a)&&a,duration:a,easing:c&&b||b&&!n.isFunction(b)&&b};return d.duration=n.fx.off?0:
 "number"==typeof d.duration?d.duration:d.duration in n.fx.speeds?n.fx.speeds[d.duration]:n.fx.speeds._default,null!=d.queue&&d.queue!==!0||(d.queue="fx"),d.old=d.complete,d.complete=function(){n.isFunction(d.old)&&d.old.call(this),d.queue&&n.dequeue(this,d.queue)},d},n.fn.extend({fadeTo:function(a,b,c,d){return this.filter(W).css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=n.isEmptyObject(a),f=n.speed(b,c,d),g=function(){var b=qb(this,n.extend({},a),f);(e||n._data(this,"finish"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return"string"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||"fx",[]),this.each(function(){var b=!0,e=null!=a&&a+"queueHooks",f=n.timers,g=n._data(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&kb.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].a
 nim.stop(c),b=!1,f.splice(e,1));!b&&c||n.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||"fx"),this.each(function(){var b,c=n._data(this),d=c[a+"queue"],e=c[a+"queueHooks"],f=n.timers,g=d?d.length:0;for(c.finish=!0,n.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),n.each(["toggle","show","hide"],function(a,b){var c=n.fn[b];n.fn[b]=function(a,d,e){return null==a||"boolean"==typeof a?c.apply(this,arguments):this.animate(mb(b,!0),a,d,e)}}),n.each({slideDown:mb("show"),slideUp:mb("hide"),slideToggle:mb("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){n.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),n.timers=[],n.fx.tick=function(){var a,b=n.timers,c=0;for(hb=n.now();c<b.length;c++)a=b[c],a()||b[c]!==a||b.splice(c--,1);b.length||n.fx.stop(),hb=void 0},n.
 fx.timer=function(a){n.timers.push(a),a()?n.fx.start():n.timers.pop()},n.fx.interval=13,n.fx.start=function(){ib||(ib=a.setInterval(n.fx.tick,n.fx.interval))},n.fx.stop=function(){a.clearInterval(ib),ib=null},n.fx.speeds={slow:600,fast:200,_default:400},n.fn.delay=function(b,c){return b=n.fx?n.fx.speeds[b]||b:b,c=c||"fx",this.queue(c,function(c,d){var e=a.setTimeout(c,b);d.stop=function(){a.clearTimeout(e)}})},function(){var a,b=d.createElement("input"),c=d.createElement("div"),e=d.createElement("select"),f=e.appendChild(d.createElement("option"));c=d.createElement("div"),c.setAttribute("className","t"),c.innerHTML="  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",a=c.getElementsByTagName("a")[0],b.setAttribute("type","checkbox"),c.appendChild(b),a=c.getElementsByTagName("a")[0],a.style.cssText="top:1px",l.getSetAttribute="t"!==c.className,l.style=/top/.test(a.getAttribute("style")),l.hrefNormalized="/a"===a.getAttribute("href"),l.checkOn=!!b.value,l.optSelected=f
 .selected,l.enctype=!!d.createElement("form").enctype,e.disabled=!0,l.optDisabled=!f.disabled,b=d.createElement("input"),b.setAttribute("value",""),l.input=""===b.getAttribute("value"),b.value="t",b.setAttribute("type","radio"),l.radioValue="t"===b.value}();var rb=/\r/g,sb=/[\x20\t\r\n\f]+/g;n.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=n.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,n(this).val()):a,null==e?e="":"number"==typeof e?e+="":n.isArray(e)&&(e=n.map(e,function(a){return null==a?"":a+""})),b=n.valHooks[this.type]||n.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=n.valHooks[e.type]||n.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(rb,""):null==c?"":c)}}}),n.extend({valHooks:{option:{get:function(a){var b=n.find.attr(a,"value");return null!=b?b:n.trim(n.text(a)).replac
 e(sb," ")}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],(c.selected||i===e)&&(l.optDisabled?!c.disabled:null===c.getAttribute("disabled"))&&(!c.parentNode.disabled||!n.nodeName(c.parentNode,"optgroup"))){if(b=n(c).val(),f)return b;g.push(b)}return g},set:function(a,b){var c,d,e=a.options,f=n.makeArray(b),g=e.length;while(g--)if(d=e[g],n.inArray(n.valHooks.option.get(d),f)>-1)try{d.selected=c=!0}catch(h){d.scrollHeight}else d.selected=!1;return c||(a.selectedIndex=-1),e}}}}),n.each(["radio","checkbox"],function(){n.valHooks[this]={set:function(a,b){return n.isArray(b)?a.checked=n.inArray(n(a).val(),b)>-1:void 0}},l.checkOn||(n.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})});var tb,ub,vb=n.expr.attrHandle,wb=/^(?:checked|selected)$/i,xb=l.getSetAttribute,yb=l.input;n.fn.extend({attr:function(a,b){return Y(this,n.attr,a,b,arguments.length
 >1)},removeAttr:function(a){return this.each(function(){n.removeAttr(this,a)})}}),n.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return"undefined"==typeof a.getAttribute?n.prop(a,b,c):(1===f&&n.isXMLDoc(a)||(b=b.toLowerCase(),e=n.attrHooks[b]||(n.expr.match.bool.test(b)?ub:tb)),void 0!==c?null===c?void n.removeAttr(a,b):e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:(a.setAttribute(b,c+""),c):e&&"get"in e&&null!==(d=e.get(a,b))?d:(d=n.find.attr(a,b),null==d?void 0:d))},attrHooks:{type:{set:function(a,b){if(!l.radioValue&&"radio"===b&&n.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(G);if(f&&1===a.nodeType)while(c=f[e++])d=n.propFix[c]||c,n.expr.match.bool.test(c)?yb&&xb||!wb.test(c)?a[d]=!1:a[n.camelCase("default-"+c)]=a[d]=!1:n.attr(a,c,""),a.removeAttribute(xb?c:d)}}),ub={set:function(a,b,c){return b===!1?n.removeAttr(a,c):yb&&xb||!wb.test(c)?a.setAttribute(!xb&&n.prop
 Fix[c]||c,c):a[n.camelCase("default-"+c)]=a[c]=!0,c}},n.each(n.expr.match.bool.source.match(/\w+/g),function(a,b){var c=vb[b]||n.find.attr;yb&&xb||!wb.test(b)?vb[b]=function(a,b,d){var e,f;return d||(f=vb[b],vb[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,vb[b]=f),e}:vb[b]=function(a,b,c){return c?void 0:a[n.camelCase("default-"+b)]?b.toLowerCase():null}}),yb&&xb||(n.attrHooks.value={set:function(a,b,c){return n.nodeName(a,"input")?void(a.defaultValue=b):tb&&tb.set(a,b,c)}}),xb||(tb={set:function(a,b,c){var d=a.getAttributeNode(c);return d||a.setAttributeNode(d=a.ownerDocument.createAttribute(c)),d.value=b+="","value"===c||b===a.getAttribute(c)?b:void 0}},vb.id=vb.name=vb.coords=function(a,b,c){var d;return c?void 0:(d=a.getAttributeNode(b))&&""!==d.value?d.value:null},n.valHooks.button={get:function(a,b){var c=a.getAttributeNode(b);return c&&c.specified?c.value:void 0},set:tb.set},n.attrHooks.contenteditable={set:function(a,b,c){tb.set(a,""===b?!1:b,c)}},n.each(["width","height"],fun
 ction(a,b){n.attrHooks[b]={set:function(a,c){return""===c?(a.setAttribute(b,"auto"),c):void 0}}})),l.style||(n.attrHooks.style={get:function(a){return a.style.cssText||void 0},set:function(a,b){return a.style.cssText=b+""}});var zb=/^(?:input|select|textarea|button|object)$/i,Ab=/^(?:a|area)$/i;n.fn.extend({prop:function(a,b){return Y(this,n.prop,a,b,arguments.length>1)},removeProp:function(a){return a=n.propFix[a]||a,this.each(function(){try{this[a]=void 0,delete this[a]}catch(b){}})}}),n.extend({prop:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return 1===f&&n.isXMLDoc(a)||(b=n.propFix[b]||b,e=n.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){var b=n.find.attr(a,"tabindex");return b?parseInt(b,10):zb.test(a.nodeName)||Ab.test(a.nodeName)&&a.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),l.hrefNormalized||n.each(["href","src"],function(a,b){n.propH
 ooks[b]={get:function(a){return a.getAttribute(b,4)}}}),l.optSelected||(n.propHooks.selected={get:function(a){var b=a.parentNode;return b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex),null},set:function(a){var b=a.parentNode;b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex)}}),n.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){n.propFix[this.toLowerCase()]=this}),l.enctype||(n.propFix.enctype="encoding");var Bb=/[\t\r\n\f]/g;function Cb(a){return n.attr(a,"class")||""}n.fn.extend({addClass:function(a){var b,c,d,e,f,g,h,i=0;if(n.isFunction(a))return this.each(function(b){n(this).addClass(a.call(this,b,Cb(this)))});if("string"==typeof a&&a){b=a.match(G)||[];while(c=this[i++])if(e=Cb(c),d=1===c.nodeType&&(" "+e+" ").replace(Bb," ")){g=0;while(f=b[g++])d.indexOf(" "+f+" ")<0&&(d+=f+" ");h=n.trim(d),e!==h&&n.attr(c,"class",h)}}return this},removeClass:function(a){var b,c,
 d,e,f,g,h,i=0;if(n.isFunction(a))return this.each(function(b){n(this).removeClass(a.call(this,b,Cb(this)))});if(!arguments.length)return this.attr("class","");if("string"==typeof a&&a){b=a.match(G)||[];while(c=this[i++])if(e=Cb(c),d=1===c.nodeType&&(" "+e+" ").replace(Bb," ")){g=0;while(f=b[g++])while(d.indexOf(" "+f+" ")>-1)d=d.replace(" "+f+" "," ");h=n.trim(d),e!==h&&n.attr(c,"class",h)}}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):n.isFunction(a)?this.each(function(c){n(this).toggleClass(a.call(this,c,Cb(this),b),b)}):this.each(function(){var b,d,e,f;if("string"===c){d=0,e=n(this),f=a.match(G)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else void 0!==a&&"boolean"!==c||(b=Cb(this),b&&n._data(this,"__className__",b),n.attr(this,"class",b||a===!1?"":n._data(this,"__className__")||""))})},hasClass:function(a){var b,c,d=0;b=" "+a+" ";while(c=this[d++])if(1===c.nodeType&&(" "+Cb
 (c)+" ").replace(Bb," ").indexOf(b)>-1)return!0;return!1}}),n.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){n.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),n.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}});var Db=a.location,Eb=n.now(),Fb=/\?/,Gb=/(,)|(\[|{)|(}|])|"(?:[^"\\\r\n]|\\["\\\/bfnrt]|\\u[\da-fA-F]{4})*"\s*:?|true|false|null|-?(?!0\d)\d+(?:\.\d+|)(?:[eE][+-]?\d+|)/g;n.parseJSON=function(b){if(a.JSON&&a.JSON.parse)return a.JSON.parse(b+"");var c,d=null,e=n.trim(b+"");return e&&!n.trim(e.replace(Gb,function(a,b,e,f){return c&&b&&(d=0),0===d?a:(c=e||b,d+=!f-!e,"")}))?Function("return "+e)():n.error("Invalid JSON: "+b)},n.parseXML=function(b){var c,d;if(!b||"string"!=typeof b)return null;try{a.DOMParser?(d=new a.DOMParser,c=d.par
 seFromString(b,"text/xml")):(c=new a.ActiveXObject("Microsoft.XMLDOM"),c.async="false",c.loadXML(b))}catch(e){c=void 0}return c&&c.documentElement&&!c.getElementsByTagName("parsererror").length||n.error("Invalid XML: "+b),c};var Hb=/#.*$/,Ib=/([?&])_=[^&]*/,Jb=/^(.*?):[ \t]*([^\r\n]*)\r?$/gm,Kb=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Lb=/^(?:GET|HEAD)$/,Mb=/^\/\//,Nb=/^([\w.+-]+:)(?:\/\/(?:[^\/?#]*@|)([^\/?#:]*)(?::(\d+)|)|)/,Ob={},Pb={},Qb="*/".concat("*"),Rb=Db.href,Sb=Nb.exec(Rb.toLowerCase())||[];function Tb(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(G)||[];if(n.isFunction(c))while(d=f[e++])"+"===d.charAt(0)?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function Ub(a,b,c,d){var e={},f=a===Pb;function g(h){var i;return e[h]=!0,n.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"
 ]&&g("*")}function Vb(a,b){var c,d,e=n.ajaxSettings.flatOptions||{};for(d in b)void 0!==b[d]&&((e[d]?a:c||(c={}))[d]=b[d]);return c&&n.extend(!0,a,c),a}function Wb(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===e&&(e=a.mimeType||b.getResponseHeader("Content-Type"));if(e)for(g in h)if(h[g]&&h[g].test(e)){i.unshift(g);break}if(i[0]in c)f=i[0];else{for(g in c){if(!i[0]||a.converters[g+" "+i[0]]){f=g;break}d||(d=g)}f=f||d}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function Xb(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g
 (b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}n.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Rb,type:"GET",isLocal:Kb.test(Sb[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Qb,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":n.parseJSON,"text xml":n.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?Vb(Vb(a,n.ajaxSettings),b):Vb(n.ajaxSettings,a)},ajaxPrefilter:Tb(Ob),ajaxTransport:Tb(Pb),ajax:function(b,c){"object"==typeof b&&(c=b,b=void 0),c=c||{};var d,e,f,g,h,i,j,k,l=n.ajaxSetup({},c),m=l.context||l,o=l.context&&(m.nodeType||m.jquery)?n(m):n.event,p=
 n.Deferred(),q=n.Callbacks("once memory"),r=l.statusCode||{},s={},t={},u=0,v="canceled",w={readyState:0,getResponseHeader:function(a){var b;if(2===u){if(!k){k={};while(b=Jb.exec(g))k[b[1].toLowerCase()]=b[2]}b=k[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===u?g:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return u||(a=t[c]=t[c]||a,s[a]=b),this},overrideMimeType:function(a){return u||(l.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>u)for(b in a)r[b]=[r[b],a[b]];else w.always(a[w.status]);return this},abort:function(a){var b=a||v;return j&&j.abort(b),y(0,b),this}};if(p.promise(w).complete=q.add,w.success=w.done,w.error=w.fail,l.url=((b||l.url||Rb)+"").replace(Hb,"").replace(Mb,Sb[1]+"//"),l.type=c.method||c.type||l.method||l.type,l.dataTypes=n.trim(l.dataType||"*").toLowerCase().match(G)||[""],null==l.crossDomain&&(d=Nb.exec(l.url.toLowerCase()),l.crossDomain=!(!d||d[1]===Sb[1]&&d[2]===Sb[2]&&(d[3]||("http:"===d[1]?"80":"4
 43"))===(Sb[3]||("http:"===Sb[1]?"80":"443")))),l.data&&l.processData&&"string"!=typeof l.data&&(l.data=n.param(l.data,l.traditional)),Ub(Ob,l,c,w),2===u)return w;i=n.event&&l.global,i&&0===n.active++&&n.event.trigger("ajaxStart"),l.type=l.type.toUpperCase(),l.hasContent=!Lb.test(l.type),f=l.url,l.hasContent||(l.data&&(f=l.url+=(Fb.test(f)?"&":"?")+l.data,delete l.data),l.cache===!1&&(l.url=Ib.test(f)?f.replace(Ib,"$1_="+Eb++):f+(Fb.test(f)?"&":"?")+"_="+Eb++)),l.ifModified&&(n.lastModified[f]&&w.setRequestHeader("If-Modified-Since",n.lastModified[f]),n.etag[f]&&w.setRequestHeader("If-None-Match",n.etag[f])),(l.data&&l.hasContent&&l.contentType!==!1||c.contentType)&&w.setRequestHeader("Content-Type",l.contentType),w.setRequestHeader("Accept",l.dataTypes[0]&&l.accepts[l.dataTypes[0]]?l.accepts[l.dataTypes[0]]+("*"!==l.dataTypes[0]?", "+Qb+"; q=0.01":""):l.accepts["*"]);for(e in l.headers)w.setRequestHeader(e,l.headers[e]);if(l.beforeSend&&(l.beforeSend.call(m,w,l)===!1||2===u))return
  w.abort();v="abort";for(e in{success:1,error:1,complete:1})w[e](l[e]);if(j=Ub(Pb,l,c,w)){if(w.readyState=1,i&&o.trigger("ajaxSend",[w,l]),2===u)return w;l.async&&l.timeout>0&&(h=a.setTimeout(function(){w.abort("timeout")},l.timeout));try{u=1,j.send(s,y)}catch(x){if(!(2>u))throw x;y(-1,x)}}else y(-1,"No Transport");function y(b,c,d,e){var k,s,t,v,x,y=c;2!==u&&(u=2,h&&a.clearTimeout(h),j=void 0,g=e||"",w.readyState=b>0?4:0,k=b>=200&&300>b||304===b,d&&(v=Wb(l,w,d)),v=Xb(l,v,w,k),k?(l.ifModified&&(x=w.getResponseHeader("Last-Modified"),x&&(n.lastModified[f]=x),x=w.getResponseHeader("etag"),x&&(n.etag[f]=x)),204===b||"HEAD"===l.type?y="nocontent":304===b?y="notmodified":(y=v.state,s=v.data,t=v.error,k=!t)):(t=y,!b&&y||(y="error",0>b&&(b=0))),w.status=b,w.statusText=(c||y)+"",k?p.resolveWith(m,[s,y,w]):p.rejectWith(m,[w,y,t]),w.statusCode(r),r=void 0,i&&o.trigger(k?"ajaxSuccess":"ajaxError",[w,l,k?s:t]),q.fireWith(m,[w,y]),i&&(o.trigger("ajaxComplete",[w,l]),--n.active||n.event.trigger("
 ajaxStop")))}return w},getJSON:function(a,b,c){return n.get(a,b,c,"json")},getScript:function(a,b){return n.get(a,void 0,b,"script")}}),n.each(["get","post"],function(a,b){n[b]=function(a,c,d,e){return n.isFunction(c)&&(e=e||d,d=c,c=void 0),n.ajax(n.extend({url:a,type:b,dataType:e,data:c,success:d},n.isPlainObject(a)&&a))}}),n._evalUrl=function(a){return n.ajax({url:a,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,"throws":!0})},n.fn.extend({wrapAll:function(a){if(n.isFunction(a))return this.each(function(b){n(this).wrapAll(a.call(this,b))});if(this[0]){var b=n(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&1===a.firstChild.nodeType)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){return n.isFunction(a)?this.each(function(b){n(this).wrapInner(a.call(this,b))}):this.each(function(){var b=n(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){v
 ar b=n.isFunction(a);return this.each(function(c){n(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){n.nodeName(this,"body")||n(this).replaceWith(this.childNodes)}).end()}});function Yb(a){return a.style&&a.style.display||n.css(a,"display")}function Zb(a){if(!n.contains(a.ownerDocument||d,a))return!0;while(a&&1===a.nodeType){if("none"===Yb(a)||"hidden"===a.type)return!0;a=a.parentNode}return!1}n.expr.filters.hidden=function(a){return l.reliableHiddenOffsets()?a.offsetWidth<=0&&a.offsetHeight<=0&&!a.getClientRects().length:Zb(a)},n.expr.filters.visible=function(a){return!n.expr.filters.hidden(a)};var $b=/%20/g,_b=/\[\]$/,ac=/\r?\n/g,bc=/^(?:submit|button|image|reset|file)$/i,cc=/^(?:input|select|textarea|keygen)/i;function dc(a,b,c,d){var e;if(n.isArray(b))n.each(b,function(b,e){c||_b.test(a)?d(a,e):dc(a+"["+("object"==typeof e&&null!=e?b:"")+"]",e,c,d)});else if(c||"object"!==n.type(b))

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css
new file mode 100644
index 0000000..ed3905e
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/static/css/bootstrap.min.css
@@ -0,0 +1,6 @@
+/*!
+ * Bootstrap v3.3.7 (http://getbootstrap.com)
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ *//*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{margin:.67em 0;font-size:2em}mark{color:#000;background:#ff0}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{height:0;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;fo
 nt-size:1em}button,input,optgroup,select,textarea{margin:0;font:inherit;color:inherit}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}input{line-height:normal}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{padding:.35em .625em .75em;margin:0 2px;border:1px solid silver}legend{padding:0;border:0}textarea{overflow:
 auto}optgroup{font-weight:700}table{border-spacing:0;border-collapse:collapse}td,th{padding:0}/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */@media print{*,:after,:before{color:#000!important;text-shadow:none!important;background:0 0!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}blockquote,pre{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px so
 lid #ddd!important}}@font-face{font-family:'Glyphicons Halflings';src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format('embedded-opentype'),url(../fonts/glyphicons-halflings-regular.woff2) format('woff2'),url(../fonts/glyphicons-halflings-regular.woff) format('woff'),url(../fonts/glyphicons-halflings-regular.ttf) format('truetype'),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\002a"}.glyphicon-plus:before{content:"\002b"}.glyphicon-eur:before,.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\
 270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:bef
 ore{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:b
 efore{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:befo
 re{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{con
 tent:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bu
 llhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{co
 ntent:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.gly
 phicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}
 .glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-cd:before{content:"\e201"}.glyphicon-save-file:before{content:"\e202"}.glyphicon-open-file:before{content:"\e203"}.glyphicon-level-up:before{content:"\e204"}.glyphicon-copy:before{content:"\e205"}.glyphicon-paste:before{content:"\e206"}.glyphicon-alert:before{content:"\e209"}.glyphicon-equalizer:before{content:"\e210"}.glyphicon-king:before{content:"\e211"}.glyphicon-queen:before{content:"\e212"}.glyphicon-pawn:before{content:"\e213"}.glyphicon-bishop:before{content:"\e214"}.glyphicon-knight:before{content:"\e215"}.glyphicon-baby-formula:before{content:"\
 e216"}.glyphicon-tent:before{content:"\26fa"}.glyphicon-blackboard:before{content:"\e218"}.glyphicon-bed:before{content:"\e219"}.glyphicon-apple:before{content:"\f8ff"}.glyphicon-erase:before{content:"\e221"}.glyphicon-hourglass:before{content:"\231b"}.glyphicon-lamp:before{content:"\e223"}.glyphicon-duplicate:before{content:"\e224"}.glyphicon-piggy-bank:before{content:"\e225"}.glyphicon-scissors:before{content:"\e226"}.glyphicon-bitcoin:before{content:"\e227"}.glyphicon-btc:before{content:"\e227"}.glyphicon-xbt:before{content:"\e227"}.glyphicon-yen:before{content:"\00a5"}.glyphicon-jpy:before{content:"\00a5"}.glyphicon-ruble:before{content:"\20bd"}.glyphicon-rub:before{content:"\20bd"}.glyphicon-scale:before{content:"\e230"}.glyphicon-ice-lolly:before{content:"\e231"}.glyphicon-ice-lolly-tasted:before{content:"\e232"}.glyphicon-education:before{content:"\e233"}.glyphicon-option-horizontal:before{content:"\e234"}.glyphicon-option-vertical:before{content:"\e235"}.glyphicon-menu-hambu
 rger:before{content:"\e236"}.glyphicon-modal-window:before{content:"\e237"}.glyphicon-oil:before{content:"\e238"}.glyphicon-grain:before{content:"\e239"}.glyphicon-sunglasses:before{content:"\e240"}.glyphicon-text-size:before{content:"\e241"}.glyphicon-text-color:before{content:"\e242"}.glyphicon-text-background:before{content:"\e243"}.glyphicon-object-align-top:before{content:"\e244"}.glyphicon-object-align-bottom:before{content:"\e245"}.glyphicon-object-align-horizontal:before{content:"\e246"}.glyphicon-object-align-left:before{content:"\e247"}.glyphicon-object-align-vertical:before{content:"\e248"}.glyphicon-object-align-right:before{content:"\e249"}.glyphicon-triangle-right:before{content:"\e250"}.glyphicon-triangle-left:before{content:"\e251"}.glyphicon-triangle-bottom:before{content:"\e252"}.glyphicon-triangle-top:before{content:"\e253"}.glyphicon-console:before{content:"\e254"}.glyphicon-superscript:before{content:"\e255"}.glyphicon-subscript:before{content:"\e256"}.glyphicon
 -menu-left:before{content:"\e257"}.glyphicon-menu-right:before{content:"\e258"}.glyphicon-menu-down:before{content:"\e259"}.glyphicon-menu-up:before{content:"\e260"}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:after,:before{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.42857143;color:#333;background-color:#fff}button,input,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#337ab7;text-decoration:none}a:focus,a:hover{color:#23527c;text-decoration:underline}a:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.carousel-inner>.item>a>img,.carousel-inner>.item>img,.img-responsive,.thumbnail a>img,.thumbnail>img{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6
 px}.img-thumbnail{display:inline-block;max-width:100%;height:auto;padding:4px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}[role=button]{cursor:pointer}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-weigh
 t:400;line-height:1;color:#777}.h1,.h2,.h3,h1,h2,h3{margin-top:20px;margin-bottom:10px}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small{font-size:65%}.h4,.h5,.h6,h4,h5,h6{margin-top:10px;margin-bottom:10px}.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-size:75%}.h1,h1{font-size:36px}.h2,h2{font-size:30px}.h3,h3{font-size:24px}.h4,h4{font-size:18px}.h5,h5{font-size:14px}.h6,h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:300;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}.small,small{font-size:85%}.mark,mark{padding:.2em;background-color:#fcf8e3}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-nowrap{white-space:nowrap}.text-lowercase{text-transform:lowercase}.text-uppercase{text-transform:uppercase}.text-c
 apitalize{text-transform:capitalize}.text-muted{color:#777}.text-primary{color:#337ab7}a.text-primary:focus,a.text-primary:hover{color:#286090}.text-success{color:#3c763d}a.text-success:focus,a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:focus,a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:focus,a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:focus,a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#337ab7}a.bg-primary:focus,a.bg-primary:hover{background-color:#286090}.bg-success{background-color:#dff0d8}a.bg-success:focus,a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:focus,a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:focus,a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:focus,a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px
 ;margin:40px 0 20px;border-bottom:1px solid #eee}ol,ul{margin-top:0;margin-bottom:10px}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;margin-left:-5px;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-top:0;margin-bottom:20px}dd,dt{line-height:1.42857143}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[data-original-title],abbr[title]{cursor:help;border-bottom:1px dotted #777}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote ol:last-child,blockquote p:last-child,blockquote ul:last-child{margin-bottom:0}blockquote .small,blockquote footer,blockquote small{display:block;font-size:80%;line-height
 :1.42857143;color:#777}blockquote .small:before,blockquote footer:before,blockquote small:before{content:'\2014 \00A0'}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;text-align:right;border-right:5px solid #eee;border-left:0}.blockquote-reverse .small:before,.blockquote-reverse footer:before,.blockquote-reverse small:before,blockquote.pull-right .small:before,blockquote.pull-right footer:before,blockquote.pull-right small:before{content:''}.blockquote-reverse .small:after,.blockquote-reverse footer:after,.blockquote-reverse small:after,blockquote.pull-right .small:after,blockquote.pull-right footer:after,blockquote.pull-right small:after{content:'\00A0 \2014'}address{margin-bottom:20px;font-style:normal;line-height:1.42857143}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#3
 33;border-radius:3px;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.25);box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}kbd kbd{padding:0;font-size:100%;font-weight:700;-webkit-box-shadow:none;box-shadow:none}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.42857143;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{margin-right:-15px;margin-left:-15px}.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg
 -3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{rig
 ht:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6
 {margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33
 333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offs
 et-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-
 push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.666666
 67%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667
 %}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}table{background-color:transparent}caption{padding-top:8px;padding-bottom:8px;color:#777;text-align:left}th{text-align:left}.table{width:100%;max-width:100%;margin-bottom:20px}.table>tbody>tr>td,.table>tbody>tr>th,.table>tfoot>tr>td,.table>tfoot>tr>th,.table>thead>tr>td,.table>thead>tr>th{padding:8px;line-height:1.42857143;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid 
 #ddd}.table>caption+thead>tr:first-child>td,.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>td,.table>thead:first-child>tr:first-child>th{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed>tbody>tr>td,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>td,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>thead>tr>th{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>tbody>tr>td,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>td,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border:1px solid #ddd}.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border-bottom-width:2px}.table-striped>tbody>tr:nth-of-type(odd){background-color:#f9f9f9}.table-hover>tbody>tr:hover{background-color:#f5f5f5}table col[class*=col-]{position:static;di
 splay:table-column;float:none}table td[class*=col-],table th[class*=col-]{position:static;display:table-cell;float:none}.table>tbody>tr.active>td,.table>tbody>tr.active>th,.table>tbody>tr>td.active,.table>tbody>tr>th.active,.table>tfoot>tr.active>td,.table>tfoot>tr.active>th,.table>tfoot>tr>td.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>thead>tr.active>th,.table>thead>tr>td.active,.table>thead>tr>th.active{background-color:#f5f5f5}.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr.active:hover>th,.table-hover>tbody>tr:hover>.active,.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover{background-color:#e8e8e8}.table>tbody>tr.success>td,.table>tbody>tr.success>th,.table>tbody>tr>td.success,.table>tbody>tr>th.success,.table>tfoot>tr.success>td,.table>tfoot>tr.success>th,.table>tfoot>tr>td.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>thead>tr.success>th,.table>thead>tr>td.success,.table>thead>tr>th.success{bac
 kground-color:#dff0d8}.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr.success:hover>th,.table-hover>tbody>tr:hover>.success,.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover{background-color:#d0e9c6}.table>tbody>tr.info>td,.table>tbody>tr.info>th,.table>tbody>tr>td.info,.table>tbody>tr>th.info,.table>tfoot>tr.info>td,.table>tfoot>tr.info>th,.table>tfoot>tr>td.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>thead>tr.info>th,.table>thead>tr>td.info,.table>thead>tr>th.info{background-color:#d9edf7}.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr.info:hover>th,.table-hover>tbody>tr:hover>.info,.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover{background-color:#c4e3f3}.table>tbody>tr.warning>td,.table>tbody>tr.warning>th,.table>tbody>tr>td.warning,.table>tbody>tr>th.warning,.table>tfoot>tr.warning>td,.table>tfoot>tr.warning>th,.table>tfoot>tr>td.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>t
 d,.table>thead>tr.warning>th,.table>thead>tr>td.warning,.table>thead>tr>th.warning{background-color:#fcf8e3}.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr.warning:hover>th,.table-hover>tbody>tr:hover>.warning,.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover{background-color:#faf2cc}.table>tbody>tr.danger>td,.table>tbody>tr.danger>th,.table>tbody>tr>td.danger,.table>tbody>tr>th.danger,.table>tfoot>tr.danger>td,.table>tfoot>tr.danger>th,.table>tfoot>tr>td.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>thead>tr.danger>th,.table>thead>tr>td.danger,.table>thead>tr>th.danger{background-color:#f2dede}.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr.danger:hover>th,.table-hover>tbody>tr:hover>.danger,.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover{background-color:#ebcccc}.table-responsive{min-height:.01%;overflow-x:auto}@media screen and (max-width:767px){.table-responsive{width:100%;margin
 -bottom:15px;overflow-y:hidden;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>td,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>thead>tr>th{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr
 >th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;max-width:100%;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=checkbox],input[type=radio]{margin:4px 0 0;margin-top:1px\9;line-height:normal}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=file]:focus,input[ty
 pe=checkbox]:focus,input[type=radio]:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.42857143;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.42857143;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control::-moz-placeholder{color:#999;opacity:1}.form-control:-ms-input-p
 laceholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control::-ms-expand{background-color:transparent;border:0}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{background-color:#eee;opacity:1}.form-control[disabled],fieldset[disabled] .form-control{cursor:not-allowed}textarea.form-control{height:auto}input[type=search]{-webkit-appearance:none}@media screen and (-webkit-min-device-pixel-ratio:0){input[type=date].form-control,input[type=time].form-control,input[type=datetime-local].form-control,input[type=month].form-control{line-height:34px}.input-group-sm input[type=date],.input-group-sm input[type=time],.input-group-sm input[type=datetime-local],.input-group-sm input[type=month],input[type=date].input-sm,input[type=time].input-sm,input[type=datetime-local].input-sm,input[type=month].input-sm{line-height:30px}.input-group-lg input[type=date],.input-group-lg input[type=time],.input-group-lg input[type=datetime-local],.input-g
 roup-lg input[type=month],input[type=date].input-lg,input[type=time].input-lg,input[type=datetime-local].input-lg,input[type=month].input-lg{line-height:46px}}.form-group{margin-bottom:15px}.checkbox,.radio{position:relative;display:block;margin-top:10px;margin-bottom:10px}.checkbox label,.radio label{min-height:20px;padding-left:20px;margin-bottom:0;font-weight:400;cursor:pointer}.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox],.radio input[type=radio],.radio-inline input[type=radio]{position:absolute;margin-top:4px\9;margin-left:-20px}.checkbox+.checkbox,.radio+.radio{margin-top:-5px}.checkbox-inline,.radio-inline{position:relative;display:inline-block;padding-left:20px;margin-bottom:0;font-weight:400;vertical-align:middle;cursor:pointer}.checkbox-inline+.checkbox-inline,.radio-inline+.radio-inline{margin-top:0;margin-left:10px}fieldset[disabled] input[type=checkbox],fieldset[disabled] input[type=radio],input[type=checkbox].disabled,input[type=checkbox][disable
 d],input[type=radio].disabled,input[type=radio][disabled]{cursor:not-allowed}.checkbox-inline.disabled,.radio-inline.disabled,fieldset[disabled] .checkbox-inline,fieldset[disabled] .radio-inline{cursor:not-allowed}.checkbox.disabled label,.radio.disabled label,fieldset[disabled] .checkbox label,fieldset[disabled] .radio label{cursor:not-allowed}.form-control-static{min-height:34px;padding-top:7px;padding-bottom:7px;margin-bottom:0}.form-control-static.input-lg,.form-control-static.input-sm{padding-right:0;padding-left:0}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}select[multiple].input-sm,textarea.input-sm{height:auto}.form-group-sm .form-control{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.form-group-sm select.form-control{height:30px;line-height:30px}.form-group-sm select[multiple].form-control,.form-group-sm textarea.form-control{height:auto}.form-group-sm .f
 orm-control-static{height:30px;min-height:32px;padding:6px 10px;font-size:12px;line-height:1.5}.input-lg{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-lg{height:46px;line-height:46px}select[multiple].input-lg,textarea.input-lg{height:auto}.form-group-lg .form-control{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.form-group-lg select.form-control{height:46px;line-height:46px}.form-group-lg select[multiple].form-control,.form-group-lg textarea.form-control{height:auto}.form-group-lg .form-control-static{height:46px;min-height:38px;padding:11px 16px;font-size:18px;line-height:1.3333333}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.form-control-feedback{position:absolute;top:0;right:0;z-index:2;display:block;width:34px;height:34px;line-height:34px;text-align:center;pointer-events:none}.form-group-lg .form-control+.form-control-feedback,.input-group-lg+.form-con
 trol-feedback,.input-lg+.form-control-feedback{width:46px;height:46px;line-height:46px}.form-group-sm .form-control+.form-control-feedback,.input-group-sm+.form-control-feedback,.input-sm+.form-control-feedback{width:30px;height:30px;line-height:30px}.has-success .checkbox,.has-success .checkbox-inline,.has-success .control-label,.has-success .help-block,.has-success .radio,.has-success .radio-inline,.has-success.checkbox label,.has-success.checkbox-inline label,.has-success.radio label,.has-success.radio-inline label{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;background-color:#dff0d8;border-color:#3c763d}.has-success .form-control-feedback{color
 :#3c763d}.has-warning .checkbox,.has-warning .checkbox-inline,.has-warning .control-label,.has-warning .help-block,.has-warning .radio,.has-warning .radio-inline,.has-warning.checkbox label,.has-warning.checkbox-inline label,.has-warning.radio label,.has-warning.radio-inline label{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;background-color:#fcf8e3;border-color:#8a6d3b}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .checkbox,.has-error .checkbox-inline,.has-error .control-label,.has-error .help-block,.has-error .radio,.has-error .radio-inline,.has-error.checkbox label,.has-error.checkbox-inline label,.has-error.radio label,.has-error
 .radio-inline label{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;background-color:#f2dede;border-color:#a94442}.has-error .form-control-feedback{color:#a94442}.has-feedback label~.form-control-feedback{top:25px}.has-feedback label.sr-only~.form-control-feedback{top:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-static{display:inline-block}.form-inline .input-group{display:inline-table;vertical-align:middle}.form-
 inline .input-group .form-control,.form-inline .input-group .input-group-addon,.form-inline .input-group .input-group-btn{width:auto}.form-inline .input-group>.form-control{width:100%}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .checkbox,.form-inline .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.form-inline .checkbox label,.form-inline .radio label{padding-left:0}.form-inline .checkbox input[type=checkbox],.form-inline .radio input[type=radio]{position:relative;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .checkbox,.form-horizontal .checkbox-inline,.form-horizontal .radio,.form-horizontal .radio-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .checkbox,.form-horizontal .radio{min-height:27px}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.form-horizontal .control-label{padding-top:7px;margin-bottom:0;text-ali
 gn:right}}.form-horizontal .has-feedback .form-control-feedback{right:15px}@media (min-width:768px){.form-horizontal .form-group-lg .control-label{padding-top:11px;font-size:18px}}@media (min-width:768px){.form-horizontal .form-group-sm .control-label{padding-top:6px;font-size:12px}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:400;line-height:1.42857143;text-align:center;white-space:nowrap;vertical-align:middle;-ms-touch-action:manipulation;touch-action:manipulation;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-image:none;border:1px solid transparent;border-radius:4px}.btn.active.focus,.btn.active:focus,.btn.focus,.btn:active.focus,.btn:active:focus,.btn:focus{outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.focus,.btn:focus,.btn:hover{color:#333;text-decoration:none}.btn.active,.btn:active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba
 (0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{cursor:not-allowed;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none;opacity:.65}a.btn.disabled,fieldset[disabled] a.btn{pointer-events:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default.focus,.btn-default:focus{color:#333;background-color:#e6e6e6;border-color:#8c8c8c}.btn-default:hover{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active.focus,.btn-default.active:focus,.btn-default.active:hover,.btn-default:active.focus,.btn-default:active:focus,.btn-default:active:hover,.open>.dropdown-toggle.btn-default.focus,.open>.dropdown-toggle.btn-default:focus,.open>.dropdown-toggle.btn-default:hover{color:#333;background-color:#d4d4d4;border-color:#8c8c8c}.btn-default.active,.btn-def
 ault:active,.open>.dropdown-toggle.btn-default{background-image:none}.btn-default.disabled.focus,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled].focus,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#337ab7;border-color:#2e6da4}.btn-primary.focus,.btn-primary:focus{color:#fff;background-color:#286090;border-color:#122b40}.btn-primary:hover{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active.focus,.btn-primary.active:focus,.btn-primary.active:hover,.btn-primary:active.focus,.btn-primary:active:focus,.btn-primary:active:hover,.open>.dropdown-toggl
 e.btn-primary.focus,.open>.dropdown-toggle.btn-primary:focus,.open>.dropdown-toggle.btn-primary:hover{color:#fff;background-color:#204d74;border-color:#122b40}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled.focus,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled].focus,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#337ab7;border-color:#2e6da4}.btn-primary .badge{color:#337ab7;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success.focus,.btn-success:focus{color:#fff;background-color:#449d44;border-color:#255625}.btn-success:hover{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{color:#fff;background-color:#449d44;bo
 rder-color:#398439}.btn-success.active.focus,.btn-success.active:focus,.btn-success.active:hover,.btn-success:active.focus,.btn-success:active:focus,.btn-success:active:hover,.open>.dropdown-toggle.btn-success.focus,.open>.dropdown-toggle.btn-success:focus,.open>.dropdown-toggle.btn-success:hover{color:#fff;background-color:#398439;border-color:#255625}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{background-image:none}.btn-success.disabled.focus,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled].focus,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info.focus,.btn-info:focus{color:#fff;background-color:#31b0d5;border-color:#1b6d85}.b
 tn-info:hover{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active.focus,.btn-info.active:focus,.btn-info.active:hover,.btn-info:active.focus,.btn-info:active:focus,.btn-info:active:hover,.open>.dropdown-toggle.btn-info.focus,.open>.dropdown-toggle.btn-info:focus,.open>.dropdown-toggle.btn-info:hover{color:#fff;background-color:#269abc;border-color:#1b6d85}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{background-image:none}.btn-info.disabled.focus,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled].focus,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;
 border-color:#eea236}.btn-warning.focus,.btn-warning:focus{color:#fff;background-color:#ec971f;border-color:#985f0d}.btn-warning:hover{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active.focus,.btn-warning.active:focus,.btn-warning.active:hover,.btn-warning:active.focus,.btn-warning:active:focus,.btn-warning:active:hover,.open>.dropdown-toggle.btn-warning.focus,.open>.dropdown-toggle.btn-warning:focus,.open>.dropdown-toggle.btn-warning:hover{color:#fff;background-color:#d58512;border-color:#985f0d}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled.focus,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled].focus,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning
 :focus,fieldset[disabled] .btn-warning:hover{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger.focus,.btn-danger:focus{color:#fff;background-color:#c9302c;border-color:#761c19}.btn-danger:hover{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active.focus,.btn-danger.active:focus,.btn-danger.active:hover,.btn-danger:active.focus,.btn-danger:active:focus,.btn-danger:active:hover,.open>.dropdown-toggle.btn-danger.focus,.open>.dropdown-toggle.btn-danger:focus,.open>.dropdown-toggle.btn-danger:hover{color:#fff;background-color:#ac2925;border-color:#761c19}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled.focus,.btn-danger.disabled:focus,.btn-dang
 er.disabled:hover,.btn-danger[disabled].focus,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{font-weight:400;color:#337ab7;border-radius:0}.btn-link,.btn-link.active,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:active,.btn-link:focus,.btn-link:hover{border-color:transparent}.btn-link:focus,.btn-link:hover{color:#23527c;text-decoration:underline;background-color:transparent}.btn-link[disabled]:focus,.btn-link[disabled]:hover,fieldset[disabled] .btn-link:focus,fieldset[disabled] .btn-link:hover{color:#777;text-decoration:none}.btn-group-lg>.btn,.btn-lg{padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.btn-group-sm>.btn,.btn-sm
 {padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-xs>.btn,.btn-xs{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:5px}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}tr.collapse.in{display:table-row}tbody.collapse.in{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition-timing-function:ease;-o-transition-timing-function:ease;transition-timing-function:ease;-webkit-transition-duration:.35s;-o-transition-duration:.35s;transition-duration:.35s;-webkit-transition-property:height,visibility;-o-transition-property:height,visibility;transition-property:height,visibility}.caret{display:inline-block;wid
 th:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px dashed;border-top:4px solid\9;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown,.dropup{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;text-align:left;list-style:none;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175)}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#333;white-space:nowrap}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{color:#262626;text-deco
 ration:none;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{color:#fff;text-decoration:none;background-color:#337ab7;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{color:#777}.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{right:0;left:auto}.dropdown-menu-left{right:auto;left:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{content:"";border-top:0;border-bottom:4px dashed;border-bott
 om:4px solid\9}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}.navbar-right .dropdown-menu-left{right:auto;left:0}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;float:left}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:2}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn,.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last
 -child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-bottom-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.b
 tn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-chi
 ld){border-top-left-radius:0;border-top-right-radius:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-top-right-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{display:table-cell;float:none;width:1%}.btn-group-justified>.btn-group .btn{width:100%}.btn-group-justified>.btn-group .dropdown-menu{left:auto}[data-toggle=buttons]>.btn input[type=checkbox],[data-toggle=buttons]>.btn input[type=radio],[data-toggle=buttons]>.btn-group>.btn input[type=checkbox],[data-toggle
 =buttons]>.btn-group>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-right:0;padding-left:0}.input-group .form-control{position:relative;z-index:2;float:left;width:100%;margin-bottom:0}.input-group .form-control:focus{z-index:3}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn,textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height
 :auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn,textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group .form-control,.input-group-addon,.input-group-btn{display:table-cell}.input-group .form-control:not(:first-child):not(:last-child),.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon
 {padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=checkbox],.input-group-addon input[type=radio]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn-group:not(:last-child)>.btn,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:first-child>.btn-group:not(:first-child)>.btn,.input-group-btn:first-child>.btn:no
 t(:first-child),.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:active,.input-group-btn>.btn:focus,.input-group-btn>.btn:hover{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{z-index:2;margin-left:-1px}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:focus,.nav>li>a:hover{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#777}.nav>li.disabled>a:focus,.nav>li.disabled>a:hover{color:#777;text-dec
 oration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:focus,.nav .open>a:hover{background-color:#eee;border-color:#337ab7}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.42857143;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:focus,.nav-tabs>li.active>a:hover{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-ta
 bs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:focus,.nav-pills>li.active>a:hover{color:#fff;background-color:#337ab7}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-
 cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{padding-right:15px;padding-left:15px;overflow-x:visible;-webkit-overflow-scrolling:touch;border-top:1px solid transparent
 ;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1)}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;-webkit-box-shadow:none;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse{padding-right:0;padding-left:0}}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:340px}@media (max-device-width:480px) and (orientation:landscape){.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:200px}}.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container-fluid>.navbar-collapse,.
 container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-bottom,.navbar-fixed-top{position:fixed;right:0;left:0;z-index:1030}@media (min-width:768px){.navbar-fixed-bottom,.navbar-fixed-top{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.navbar-brand{float:left;height:50px;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-brand>img{display:block}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;bord
 er-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-nav .open .dropdown-menu .dropdown-header,.navbar-nav .open .dropdown-menu>li>a{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:focus,.navbar-nav .open .dropdown-menu>li>a:hover{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15
 px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1)}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .form-control-static{display:inline-block}.navbar-form .input-group{display:inline-table;vertical-align:middle}.navbar-form .input-group .form-control,.navbar-form .input-group .input-group-addon,.navbar-form .input-group .input-group-btn{width:auto}.navbar-form .input-group>.form-control{width:100%}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .checkbox,.navbar-form .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.navbar-form .checkbox label,.navbar-form .rad
 io label{padding-left:0}.navbar-form .checkbox input[type=checkbox],.navbar-form .radio input[type=radio]{position:relative;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}.navbar-form .form-group:last-child{margin-bottom:0}}@media (min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-left-radius:0;border-top-right-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{margin-bottom:0;border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left
 ;margin-right:15px;margin-left:15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important;margin-right:-15px}.navbar-right~.navbar-right{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:focus,.navbar-default .navbar-brand:hover{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:focus,.navbar-default .navbar-nav>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:focus,.navbar-default .navbar-nav>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:focus,.navbar-default .navbar-nav>.disabled>a:hover{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd
 }.navbar-default .navbar-toggle:focus,.navbar-default .navbar-toggle:hover{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:focus,.navbar-default .navbar-nav>.open>a:hover{color:#555;background-color:#e7e7e7}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:f
 ocus,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-default .btn-link{color:#777}.navbar-default .btn-link:focus,.navbar-default .btn-link:hover{color:#333}.navbar-default .btn-link[disabled]:focus,.navbar-default .btn-link[disabled]:hover,fieldset[disabled] .navbar-default .btn-link:focus,fieldset[disabled] .navbar-default .btn-link:hover{color:#ccc}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#9d9d9d}.navbar-inverse .navbar-brand:focus,.navbar-inverse .navbar-brand:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a:focus,.navbar-inverse .navbar-nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>
 .active>a:focus,.navbar-inverse .navbar-nav>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:focus,.navbar-inverse .navbar-nav>.disabled>a:hover{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:focus,.navbar-inverse .navbar-toggle:hover{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:focus,.navbar-inverse .navbar-nav>.open>a:hover{color:#fff;background-color:#080808}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#9d9d9d}.navbar-inverse .na
 vbar-nav .open .dropdown-menu>li>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#9d9d9d}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .btn-link{color:#9d9d9d}.navbar-inverse .btn-link:focus,.navbar-inverse .btn-link:hover{color:#fff}.navbar-inverse .btn-link[disabled]:focus,.navbar-inverse .btn-link[disabled]:hover,fieldset[disabled] .navbar-inverse .btn-link:focus,fieldset[disabled] .navbar-inverse .btn-link:hover{color
 :#444}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#777}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.42857143;color:#337ab7;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-top-left-radius:4px;border-bottom-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:focus,.pagination>li>a:hover,.pagination>li>span:focus,.pagination>li>span:hover{z-index:2;color:#23527c;background-color:#eee;border-color:#ddd}.pagination>.active>a,.pag
 ination>.active>a:focus,.pagination>.active>a:hover,.pagination>.active>span,.pagination>.active>span:focus,.pagination>.active>span:hover{z-index:3;color:#fff;cursor:default;background-color:#337ab7;border-color:#337ab7}.pagination>.disabled>a,.pagination>.disabled>a:focus,.pagination>.disabled>a:hover,.pagination>.disabled>span,.pagination>.disabled>span:focus,.pagination>.disabled>span:hover{color:#777;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px;line-height:1.3333333}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-top-left-radius:6px;border-bottom-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px;line-height:1.5}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-top-left-radius:3p
 x;border-bottom-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:focus,.pager li>a:hover{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:focus,.pager .disabled>a:hover,.pager .disabled>span{color:#777;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}a.label:focus,a.label:hover{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{posit
 ion:relative;top:-1px}.label-default{background-color:#777}.label-default[href]:focus,.label-default[href]:hover{background-color:#5e5e5e}.label-primary{background-color:#337ab7}.label-primary[href]:focus,.label-primary[href]:hover{background-color:#286090}.label-success{background-color:#5cb85c}.label-success[href]:focus,.label-success[href]:hover{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:focus,.label-info[href]:hover{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:focus,.label-warning[href]:hover{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:focus,.label-danger[href]:hover{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:middle;background-color:#777;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-
 1px}.btn-group-xs>.btn .badge,.btn-xs .badge{top:0;padding:1px 5px}a.badge:focus,a.badge:hover{color:#fff;text-decoration:none;cursor:pointer}.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#337ab7;background-color:#fff}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding-top:30px;padding-bottom:30px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron .h1,.jumbotron h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.jumbotron>hr{border-top-color:#d5d5d5}.container .jumbotron,.container-fluid .jumbotron{padding-right:15px;padding-left:15px;border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron,.container-fluid .jumbotron{padding-right:60px;padding-left:60px}.jumbotron .h1,.jumbotron h1{font-size:63px}}.thumbnail{display:block;paddin
 g:4px;margin-bottom:20px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:border .2s ease-in-out;-o-transition:border .2s ease-in-out;transition:border .2s ease-in-out}.thumbnail a>img,.thumbnail>img{margin-right:auto;margin-left:auto}a.thumbnail.active,a.thumbnail:focus,a.thumbnail:hover{border-color:#337ab7}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable,.alert-dismissible{padding-right:35px}.alert-dismissable .close,.alert-dismissible .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{color:#31708f;background-color:#d9ed
 f7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:ce
 nter;background-color:#337ab7;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress-bar-striped,.progress-striped .progress-bar{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;background-size:40px 40px}.progress-bar.active,.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-o-animation:prog
 ress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-i
 mage:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-dang
 er{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media{margin-top:15px}.media:first-child{margin-top:0}.media,.media-body{overflow:hidden;zoom:1}.media-body{width:10000px}.media-object{display:block}.media-object.img-thumbnail{max-width:none}.media-right,.media>.pull-right{padding-left:10px}.media-left,.media>.pull-left{padding-right:10px}.media-body,.media-left,.media-right{display:table-cell;vertical-align:top}.media-middle{ver
 tical-align:middle}.media-bottom{vertical-align:bottom}.media-heading{margin-top:0;margin-bottom:5px}.media

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: HDDS-38. Add SCMNodeStorage map in SCM class to store storage statistics per Datanode. Contributed by Shashikant Banerjee.

Posted by ar...@apache.org.
HDDS-38. Add SCMNodeStorage map in SCM class to store storage statistics per Datanode.
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c485a67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c485a67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c485a67

Branch: refs/heads/HDDS-48
Commit: 7c485a6701275578cb22392168b2b31726121ceb
Parents: e0367d3
Author: Anu Engineer <ae...@apache.org>
Authored: Thu May 17 16:13:28 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Thu May 17 16:13:28 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/ozone/OzoneConfigKeys.java    |  10 +
 .../common/src/main/resources/ozone-default.xml |  19 ++
 .../placement/metrics/SCMNodeStat.java          |  21 ++
 .../hdds/scm/node/SCMNodeStorageStatMXBean.java |  69 +++++
 .../hdds/scm/node/SCMNodeStorageStatMap.java    | 277 +++++++++++++++++++
 .../scm/node/TestSCMNodeStorageStatMap.java     | 188 +++++++++++++
 6 files changed, 584 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c485a67/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index b8dbd7b..d1377be 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -244,6 +244,16 @@ public final class OzoneConfigKeys {
   public static final String HDDS_DATANODE_PLUGINS_KEY =
       "hdds.datanode.plugins";
 
+  public static final String
+      HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD =
+      "hdds.datanode.storage.utilization.warning.threshold";
+  public static final double
+      HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD_DEFAULT = 0.95;
+  public static final String
+      HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD =
+      "hdds.datanode.storage.utilization.critical.threshold";
+  public static final double
+      HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT = 0.75;
   /**
    * There is no need to instantiate this class.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c485a67/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 774b1b8..648ba05 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1057,5 +1057,24 @@
       HDDS service starts as part of datanode.
     </description>
   </property>
+  <property>
+    <name>hdds.datanode.storage.utilization.warning.threshold</name>
+    <value>0.75</value>
+    <tag>OZONE, SCM, MANAGEMENT</tag>
+    <description>
+      If a datanode overall storage utilization exceeds more than this
+      value, a warning will be logged while processing the nodeReport in SCM.
+    </description>
+  </property>
+
+  <property>
+    <name>hdds.datanode.storage.utilization.critical.threshold</name>
+    <value>0.95</value>
+    <tag>OZONE, SCM, MANAGEMENT</tag>
+    <description>
+      If a datanode overall storage utilization exceeds more than this
+      value, the datanode will be marked out of space.
+    </description>
+  </property>
 
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c485a67/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
index 3c871d3..4fe72fc 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
@@ -136,4 +136,25 @@ public class SCMNodeStat implements NodeStat {
   public int hashCode() {
     return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get());
   }
+
+
+  /**
+   * Truncate to 4 digits since uncontrolled precision is some times
+   * counter intuitive to what users expect.
+   * @param value - double.
+   * @return double.
+   */
+  private double truncateDecimals(double value) {
+    final int multiplier = 10000;
+    return (double) ((long) (value * multiplier)) / multiplier;
+  }
+
+  /**
+   * get the scmUsed ratio
+   */
+  public  double getScmUsedratio() {
+    double scmUsedRatio =
+        truncateDecimals(getScmUsed().get() / (double) getCapacity().get());
+    return scmUsedRatio;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c485a67/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
new file mode 100644
index 0000000..f17a970
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.node;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import java.util.UUID;
+
+/**
+ *
+ * This is the JMX management interface for node manager information.
+ */
+@InterfaceAudience.Private
+public interface SCMNodeStorageStatMXBean {
+  /**
+   * Get the capacity of the dataNode
+   * @param datanodeID Datanode Id
+   * @return long
+   */
+  long getCapacity(UUID datanodeID);
+
+  /**
+   * Returns the remaining space of a Datanode.
+   * @param datanodeId Datanode Id
+   * @return long
+   */
+  long getRemainingSpace(UUID datanodeId);
+
+
+  /**
+   * Returns used space in bytes of a Datanode.
+   * @return long
+   */
+  long getUsedSpace(UUID datanodeId);
+
+  /**
+   * Returns the total capacity of all dataNodes
+   * @return long
+   */
+  long getTotalCapacity();
+
+  /**
+   * Returns the total Used Space in all Datanodes.
+   * @return long
+   */
+  long getTotalSpaceUsed();
+
+  /**
+   * Returns the total Remaining Space in all Datanodes.
+   * @return long
+   */
+  long getTotalFreeSpace();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c485a67/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
new file mode 100644
index 0000000..25cb357
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
@@ -0,0 +1,277 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.scm.node;
+
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.management.ObjectName;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.DUPLICATE_DATANODE;
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.NO_SUCH_DATANODE;
+
+/**
+ * This data structure maintains the disk space capacity, disk usage and free
+ * space availability per Datanode.
+ * This information is built from the DN node reports.
+ */
+public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
+  static final Logger LOG =
+      LoggerFactory.getLogger(SCMNodeStorageStatMap.class);
+
+  private final double warningUtilizationThreshold;
+  private final double criticalUtilizationThreshold;
+
+  private final Map<UUID, SCMNodeStat> scmNodeStorageStatMap;
+  // NodeStorageInfo MXBean
+  private ObjectName scmNodeStorageInfoBean;
+  // Aggregated node stats
+  private SCMNodeStat clusterStat;
+  /**
+   * constructs the scmNodeStorageStatMap object
+   */
+  public SCMNodeStorageStatMap(OzoneConfiguration conf) {
+    scmNodeStorageStatMap = new ConcurrentHashMap<>();
+    warningUtilizationThreshold = conf.getDouble(
+        OzoneConfigKeys.
+            HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD,
+        OzoneConfigKeys.
+            HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD_DEFAULT);
+    criticalUtilizationThreshold = conf.getDouble(
+        OzoneConfigKeys.
+            HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD,
+        OzoneConfigKeys.
+            HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT);
+    clusterStat = new SCMNodeStat();
+  }
+
+  public enum UtilizationThreshold {
+    NORMAL, WARN, CRITICAL;
+  }
+
+  /**
+   * Returns true if this a datanode that is already tracked by
+   * scmNodeStorageStatMap.
+   *
+   * @param datanodeID - UUID of the Datanode.
+   * @return True if this is tracked, false if this map does not know about it.
+   */
+  public boolean isKnownDatanode(UUID datanodeID) {
+    Preconditions.checkNotNull(datanodeID);
+    return scmNodeStorageStatMap.containsKey(datanodeID);
+  }
+
+  public List<UUID> getDatanodeList(
+      UtilizationThreshold threshold) {
+    return scmNodeStorageStatMap.entrySet().stream()
+        .filter(entry -> (isThresholdReached(threshold, entry.getValue())))
+        .map(Map.Entry::getKey)
+        .collect(Collectors.toList());
+  }
+
+
+
+  /**
+   * Insert a new datanode into Node2Container Map.
+   *
+   * @param datanodeID -- Datanode UUID
+   * @param stat - scmNode stat for the Datanode.
+   */
+  public void insertNewDatanode(UUID datanodeID, SCMNodeStat stat)
+      throws SCMException {
+    Preconditions.checkNotNull(stat);
+    Preconditions.checkNotNull(datanodeID);
+    synchronized (scmNodeStorageStatMap) {
+      if (isKnownDatanode(datanodeID)) {
+        throw new SCMException("Node already exists in the map",
+            DUPLICATE_DATANODE);
+      }
+      scmNodeStorageStatMap.put(datanodeID, stat);
+      clusterStat.add(stat);
+    }
+  }
+
+  //TODO: This should be called once SCMNodeManager gets Started.
+  private void registerMXBean() {
+    this.scmNodeStorageInfoBean = MBeans.register("StorageContainerManager",
+        "scmNodeStorageInfo", this);
+  }
+
+  //TODO: Unregister call should happen as a part of SCMNodeManager shutdown.
+  private void unregisterMXBean() {
+    if(this.scmNodeStorageInfoBean != null) {
+      MBeans.unregister(this.scmNodeStorageInfoBean);
+      this.scmNodeStorageInfoBean = null;
+    }
+  }
+  /**
+   * Updates the Container list of an existing DN.
+   *
+   * @param datanodeID - UUID of DN.
+   * @param stat - scmNode stat for the Datanode.
+   * @throws SCMException - if we don't know about this datanode, for new DN
+   *                      use insertNewDatanode.
+   */
+  public void updateDatanodeMap(UUID datanodeID, SCMNodeStat stat)
+      throws SCMException {
+    Preconditions.checkNotNull(datanodeID);
+    Preconditions.checkNotNull(stat);
+    synchronized (scmNodeStorageStatMap) {
+      if (!scmNodeStorageStatMap.containsKey(datanodeID)) {
+        throw new SCMException("No such datanode", NO_SUCH_DATANODE);
+      }
+      SCMNodeStat removed = scmNodeStorageStatMap.get(datanodeID);
+      clusterStat.subtract(removed);
+      scmNodeStorageStatMap.put(datanodeID, stat);
+      clusterStat.add(stat);
+    }
+  }
+
+  public NodeReportStatus processNodeReport(UUID datanodeID,
+      StorageContainerDatanodeProtocolProtos.SCMNodeReport nodeReport)
+      throws SCMException {
+    Preconditions.checkNotNull(datanodeID);
+    Preconditions.checkNotNull(nodeReport);
+    long totalCapacity = 0;
+    long totalRemaining = 0;
+    long totalScmUsed = 0;
+    List<StorageContainerDatanodeProtocolProtos.SCMStorageReport>
+        storageReports = nodeReport.getStorageReportList();
+    for (StorageContainerDatanodeProtocolProtos.SCMStorageReport report : storageReports) {
+      totalCapacity += report.getCapacity();
+      totalRemaining += report.getRemaining();
+      totalScmUsed += report.getScmUsed();
+    }
+    SCMNodeStat stat = scmNodeStorageStatMap.get(datanodeID);
+    if (stat == null) {
+      stat = new SCMNodeStat();
+      stat.set(totalCapacity, totalScmUsed, totalRemaining);
+      insertNewDatanode(datanodeID, stat);
+    } else {
+      stat.set(totalCapacity, totalScmUsed, totalRemaining);
+      updateDatanodeMap(datanodeID, stat);
+    }
+    if (isThresholdReached(UtilizationThreshold.CRITICAL, stat)) {
+      LOG.warn("Datanode {} is out of storage space. Capacity: {}, Used: {}",
+          datanodeID, stat.getCapacity().get(), stat.getScmUsed().get());
+      return NodeReportStatus.DATANODE_OUT_OF_SPACE;
+    } else {
+      if (isThresholdReached(UtilizationThreshold.WARN, stat)) {
+       LOG.warn("Datanode {} is low on storage space. Capacity: {}, Used: {}",
+           datanodeID, stat.getCapacity().get(), stat.getScmUsed().get());
+      }
+      return NodeReportStatus.ALL_IS_WELL;
+    }
+  }
+
+  private boolean isThresholdReached(UtilizationThreshold threshold,
+      SCMNodeStat stat) {
+    switch (threshold) {
+    case NORMAL:
+      return stat.getScmUsedratio() < warningUtilizationThreshold;
+    case WARN:
+      return stat.getScmUsedratio() >= warningUtilizationThreshold &&
+          stat.getScmUsedratio() < criticalUtilizationThreshold;
+    case CRITICAL:
+      return stat.getScmUsedratio() >= criticalUtilizationThreshold;
+    default:
+      throw new RuntimeException("Unknown UtilizationThreshold value");
+    }
+  }
+
+  @Override
+  public long getCapacity(UUID dnId) {
+    return scmNodeStorageStatMap.get(dnId).getCapacity().get();
+  }
+
+  @Override
+  public long getRemainingSpace(UUID dnId) {
+    return scmNodeStorageStatMap.get(dnId).getRemaining().get();
+  }
+
+  @Override
+  public long getUsedSpace(UUID dnId) {
+    return scmNodeStorageStatMap.get(dnId).getScmUsed().get();
+  }
+
+  @Override
+  public long getTotalCapacity() {
+    return clusterStat.getCapacity().get();
+  }
+
+  @Override
+  public long getTotalSpaceUsed() {
+    return clusterStat.getScmUsed().get();
+  }
+
+  @Override
+  public long getTotalFreeSpace() {
+    return clusterStat.getRemaining().get();
+  }
+
+  /**
+   * removes the dataNode from scmNodeStorageStatMap
+   * @param datanodeID
+   * @throws SCMException in case the dataNode is not found in the map.
+   */
+  public void removeDatanode(UUID datanodeID) throws SCMException {
+    Preconditions.checkNotNull(datanodeID);
+    synchronized (scmNodeStorageStatMap) {
+      if (!scmNodeStorageStatMap.containsKey(datanodeID)) {
+        throw new SCMException("No such datanode", NO_SUCH_DATANODE);
+      }
+      SCMNodeStat stat = scmNodeStorageStatMap.remove(datanodeID);
+      clusterStat.subtract(stat);
+    }
+  }
+
+  /**
+   * Gets the SCMNodeStat for the datanode
+   * @param  datanodeID
+   * @return SCMNodeStat
+   */
+
+  SCMNodeStat getNodeStat(UUID datanodeID) {
+    return scmNodeStorageStatMap.get(datanodeID);
+  }
+
+  /**
+   * Results possible from processing a Node report by
+   * Node2ContainerMapper.
+   */
+  public enum NodeReportStatus {
+    ALL_IS_WELL,
+    DATANODE_OUT_OF_SPACE
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c485a67/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
new file mode 100644
index 0000000..03bfbab
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.node;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.junit.*;
+import org.junit.rules.ExpectedException;
+
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+
+public class TestSCMNodeStorageStatMap {
+  private final static int DATANODE_COUNT = 300;
+  final long capacity = 10L * OzoneConsts.GB;
+  final long used = 2L * OzoneConsts.GB;
+  final long remaining = capacity - used;
+  private static OzoneConfiguration conf = new OzoneConfiguration();
+  private final Map<UUID, SCMNodeStat> testData = new ConcurrentHashMap<>();
+
+  @Rule
+  public ExpectedException thrown = ExpectedException.none();
+
+  private void generateData() {
+    SCMNodeStat stat = new SCMNodeStat();
+    stat.set(capacity, used, remaining);
+    for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) {
+      testData.put(UUID.randomUUID(), stat);
+    }
+  }
+
+  private UUID getFirstKey() {
+    return testData.keySet().iterator().next();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    generateData();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+  }
+
+  @Test
+  public void testIsKnownDatanode() throws SCMException {
+    SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
+    UUID knownNode = getFirstKey();
+    UUID unknownNode = UUID.randomUUID();
+    SCMNodeStat stat = testData.get(knownNode);
+    map.insertNewDatanode(knownNode, stat);
+    Assert.assertTrue("Not able to detect a known node",
+        map.isKnownDatanode(knownNode));
+    Assert.assertFalse("Unknown node detected",
+        map.isKnownDatanode(unknownNode));
+  }
+
+  @Test
+  public void testInsertNewDatanode() throws SCMException {
+    SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
+    UUID knownNode = getFirstKey();
+    SCMNodeStat stat = testData.get(knownNode);
+    map.insertNewDatanode(knownNode, stat);
+    Assert.assertEquals(map.getNodeStat(knownNode).getScmUsed(),
+        testData.get(knownNode).getScmUsed());
+    thrown.expect(SCMException.class);
+    thrown.expectMessage("already exists");
+    map.insertNewDatanode(knownNode, stat);
+  }
+
+  @Test
+  public void testUpdateUnknownDatanode() throws SCMException {
+    SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
+    UUID unknownNode = UUID.randomUUID();
+    SCMNodeStat stat = new SCMNodeStat();
+
+    thrown.expect(SCMException.class);
+    thrown.expectMessage("No such datanode");
+    map.updateDatanodeMap(unknownNode, stat);
+  }
+
+  @Test
+  public void testProcessNodeReportCheckOneNode() throws SCMException {
+    UUID key = getFirstKey();
+    SCMNodeStat value = testData.get(key);
+    SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
+    map.insertNewDatanode(key, value);
+    Assert.assertTrue(map.isKnownDatanode(key));
+    SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
+    SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
+    srb.setStorageUuid(UUID.randomUUID().toString());
+    srb.setCapacity(value.getCapacity().get())
+        .setScmUsed(value.getScmUsed().get()).
+        setRemaining(value.getRemaining().get()).build();
+    SCMNodeStorageStatMap.NodeReportStatus status =
+        map.processNodeReport(key, nrb.addStorageReport(srb).build());
+    Assert.assertEquals(status,
+        SCMNodeStorageStatMap.NodeReportStatus.ALL_IS_WELL);
+  }
+
+  @Test
+  public void testProcessNodeReportAndSCMStats() throws SCMException {
+    SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
+    int counter = 1;
+    // Insert all testData into the SCMNodeStorageStatMap Map.
+    for (Map.Entry<UUID, SCMNodeStat> keyEntry : testData.entrySet()) {
+      map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue());
+    }
+    Assert.assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity());
+    Assert.assertEquals(DATANODE_COUNT * remaining, map.getTotalFreeSpace());
+    Assert.assertEquals(DATANODE_COUNT * used, map.getTotalSpaceUsed());
+
+    // upadate 1/4th of the datanode to be full
+    for (Map.Entry<UUID, SCMNodeStat> keyEntry : testData.entrySet()) {
+      SCMNodeStat stat = new SCMNodeStat(capacity, capacity, 0);
+      map.updateDatanodeMap(keyEntry.getKey(), stat);
+      counter++;
+      if (counter > DATANODE_COUNT / 4) {
+        break;
+      }
+    }
+    Assert.assertEquals(DATANODE_COUNT / 4,
+        map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.CRITICAL)
+            .size());
+    Assert.assertEquals(0,
+        map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.WARN)
+            .size());
+    Assert.assertEquals(0.75 * DATANODE_COUNT,
+        map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL)
+            .size(), 0);
+
+    Assert.assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity(), 0);
+    Assert.assertEquals(0.75 * DATANODE_COUNT * remaining,
+        map.getTotalFreeSpace(), 0);
+    Assert.assertEquals(
+        0.75 * DATANODE_COUNT * used + (0.25 * DATANODE_COUNT * capacity),
+        map.getTotalSpaceUsed(), 0);
+    counter = 1;
+    // Remove 1/4 of the DataNodes from the Map
+    for (Map.Entry<UUID, SCMNodeStat> keyEntry : testData.entrySet()) {
+      map.removeDatanode(keyEntry.getKey());
+      counter++;
+      if (counter > DATANODE_COUNT / 4) {
+        break;
+      }
+    }
+
+    Assert.assertEquals(0,
+        map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.CRITICAL)
+            .size());
+    Assert.assertEquals(0,
+        map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.WARN)
+            .size());
+    Assert.assertEquals(0.75 * DATANODE_COUNT,
+        map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL)
+            .size(), 0);
+
+    Assert.assertEquals(0.75 * DATANODE_COUNT * capacity, map.getTotalCapacity(), 0);
+    Assert.assertEquals(0.75 * DATANODE_COUNT * remaining,
+        map.getTotalFreeSpace(), 0);
+    Assert.assertEquals(
+        0.75 * DATANODE_COUNT * used ,
+        map.getTotalSpaceUsed(), 0);
+
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: HDDS-87:Fix test failures with uninitialized storageLocation field in storageReport Contributed by Shashikant Banerjee

Posted by ar...@apache.org.
HDDS-87:Fix test failures with uninitialized storageLocation field in storageReport Contributed by Shashikant Banerjee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d2d9dbc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d2d9dbc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d2d9dbc

Branch: refs/heads/HDDS-48
Commit: 3d2d9dbcaa73fd72d614a8cf5a5be2806dd31537
Parents: c97df77
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Mon May 21 08:01:51 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Mon May 21 08:01:51 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdds/scm/TestUtils.java   | 35 +++++++++---
 .../hdds/scm/node/TestContainerPlacement.java   | 13 ++---
 .../hadoop/hdds/scm/node/TestNodeManager.java   | 58 ++++++++++----------
 .../scm/node/TestSCMNodeStorageStatMap.java     | 22 +++++---
 .../ozone/container/common/TestEndPoint.java    | 32 ++++++-----
 5 files changed, 92 insertions(+), 68 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d2d9dbc/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
index ab94ef6..5cf0a92 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
@@ -16,10 +16,13 @@
  */
 package org.apache.hadoop.hdds.scm;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol
     .proto.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
 import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.StorageTypeProto;
 import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 
@@ -58,19 +61,35 @@ public final class TestUtils {
    * Create Node Report object.
    * @return SCMNodeReport
    */
-  public static SCMNodeReport createNodeReport() {
+  public static SCMNodeReport createNodeReport(List<SCMStorageReport> reports) {
     SCMNodeReport.Builder nodeReport = SCMNodeReport.newBuilder();
-    for (int i = 0; i < 1; i++) {
+    nodeReport.addAllStorageReport(reports);
+    return nodeReport.build();
+  }
+
+  /**
+   * Create SCM Storage Report object.
+   * @return list of SCMStorageReport
+   */
+  public static List<SCMStorageReport> createStorageReport(long capacity,
+      long used, long remaining, String path, StorageTypeProto type, String id,
+      int count) {
+    List<SCMStorageReport> reportList = new ArrayList<>();
+    for (int i = 0; i < count; i++) {
+      Preconditions.checkNotNull(path);
+      Preconditions.checkNotNull(id);
       SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
-      nodeReport.addStorageReport(i, srb.setStorageUuid("disk")
-          .setCapacity(100)
-          .setScmUsed(10)
-          .setRemaining(90)
-          .build());
+      srb.setStorageUuid(id).setStorageLocation(path).setCapacity(capacity)
+          .setScmUsed(used).setRemaining(remaining);
+      StorageTypeProto storageTypeProto =
+          type == null ? StorageTypeProto.DISK : type;
+      srb.setStorageType(storageTypeProto);
+      reportList.add(srb.build());
     }
-    return nodeReport.build();
+    return reportList;
   }
 
+
   /**
    * Get specified number of DatanodeDetails and registered them with node
    * manager.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d2d9dbc/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 6f994a9..321e4e2 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -36,8 +36,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ReportState;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
-import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -140,13 +138,12 @@ public class TestContainerPlacement {
         TestUtils.getListOfRegisteredDatanodeDetails(nodeManager, nodeCount);
     try {
       for (DatanodeDetails datanodeDetails : datanodes) {
-        SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
-        SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
-        srb.setStorageUuid(UUID.randomUUID().toString());
-        srb.setCapacity(capacity).setScmUsed(used).
-            setRemaining(remaining).build();
+        String id = UUID.randomUUID().toString();
+        String path = testDir.getAbsolutePath() + "/" + id;
+        List<SCMStorageReport> reports = TestUtils
+            .createStorageReport(capacity, used, remaining, path, null, id, 1);
         nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
-            nrb.addStorageReport(srb).build(), reportState);
+            TestUtils.createNodeReport(reports), reportState);
       }
 
       GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d2d9dbc/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
index 117c258..9fe38ce 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
@@ -281,9 +281,13 @@ public class TestNodeManager {
     conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
         100, TimeUnit.MILLISECONDS);
     DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+    String dnId = datanodeDetails.getUuidString();
+    String storagePath = testDir.getAbsolutePath() + "/" + dnId;
+    List<SCMStorageReport> reports =
+        TestUtils.createStorageReport(100, 10, 90, storagePath, null, dnId, 1);
     try (SCMNodeManager nodemanager = createNodeManager(conf)) {
       nodemanager.register(datanodeDetails.getProtoBufMessage(),
-          TestUtils.createNodeReport());
+          TestUtils.createNodeReport(reports));
       List<SCMCommand> command = nodemanager.sendHeartbeat(
           datanodeDetails.getProtoBufMessage(),
           null, reportState);
@@ -1012,14 +1016,14 @@ public class TestNodeManager {
       for (int x = 0; x < nodeCount; x++) {
         DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
             nodeManager);
-
-        SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
-        SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
-        srb.setStorageUuid(UUID.randomUUID().toString());
-        srb.setCapacity(capacity).setScmUsed(used).
-            setRemaining(capacity - used).build();
+        String dnId = datanodeDetails.getUuidString();
+        long free = capacity - used;
+        String storagePath = testDir.getAbsolutePath() + "/" + dnId;
+        List<SCMStorageReport> reports = TestUtils
+            .createStorageReport(capacity, used, free, storagePath,
+                null, dnId, 1);
         nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
-            nrb.addStorageReport(srb).build(), reportState);
+            TestUtils.createNodeReport(reports), reportState);
       }
       GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
           100, 4 * 1000);
@@ -1055,21 +1059,21 @@ public class TestNodeManager {
     conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
 
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
-          nodeManager);
+      DatanodeDetails datanodeDetails =
+          TestUtils.getDatanodeDetails(nodeManager);
       final long capacity = 2000;
       final long usedPerHeartbeat = 100;
-
+      String dnId = datanodeDetails.getUuidString();
       for (int x = 0; x < heartbeatCount; x++) {
-        SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
-        SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
-        srb.setStorageUuid(UUID.randomUUID().toString());
-        srb.setCapacity(capacity).setScmUsed(x * usedPerHeartbeat)
-            .setRemaining(capacity - x * usedPerHeartbeat).build();
-        nrb.addStorageReport(srb);
-
-        nodeManager.sendHeartbeat(
-            datanodeDetails.getProtoBufMessage(), nrb.build(), reportState);
+        long scmUsed = x * usedPerHeartbeat;
+        long remaining = capacity - scmUsed;
+        String storagePath = testDir.getAbsolutePath() + "/" + dnId;
+        List<SCMStorageReport> reports = TestUtils
+            .createStorageReport(capacity, scmUsed, remaining, storagePath,
+                null, dnId, 1);
+
+        nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+            TestUtils.createNodeReport(reports), reportState);
         Thread.sleep(100);
       }
 
@@ -1145,14 +1149,12 @@ public class TestNodeManager {
       assertEquals(0, foundRemaining);
 
       // Send a new report to bring the dead node back to healthy
-      SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
-      SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
-      srb.setStorageUuid(UUID.randomUUID().toString());
-      srb.setCapacity(capacity).setScmUsed(expectedScmUsed)
-          .setRemaining(expectedRemaining).build();
-      nrb.addStorageReport(srb);
-      nodeManager.sendHeartbeat(
-          datanodeDetails.getProtoBufMessage(), nrb.build(), reportState);
+      String storagePath = testDir.getAbsolutePath() + "/" + dnId;
+      List<SCMStorageReport> reports = TestUtils
+          .createStorageReport(capacity, expectedScmUsed, expectedRemaining,
+              storagePath, null, dnId, 1);
+      nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+          TestUtils.createNodeReport(reports), reportState);
 
       // Wait up to 5 seconds so that the dead node becomes healthy
       // Verify usage info should be updated.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d2d9dbc/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
index 03bfbab..2fa786b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
@@ -18,16 +18,17 @@
 package org.apache.hadoop.hdds.scm.node;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
-import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.*;
 import org.junit.rules.ExpectedException;
 
+import java.util.List;
 import java.util.Map;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
@@ -108,14 +109,17 @@ public class TestSCMNodeStorageStatMap {
     SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
     map.insertNewDatanode(key, value);
     Assert.assertTrue(map.isKnownDatanode(key));
-    SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
-    SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
-    srb.setStorageUuid(UUID.randomUUID().toString());
-    srb.setCapacity(value.getCapacity().get())
-        .setScmUsed(value.getScmUsed().get()).
-        setRemaining(value.getRemaining().get()).build();
+    String storageId = UUID.randomUUID().toString();
+    String path =
+        GenericTestUtils.getRandomizedTempPath().concat("/" + storageId);
+    long capacity = value.getCapacity().get();
+    long used = value.getScmUsed().get();
+    long remaining = value.getRemaining().get();
+    List<SCMStorageReport> reports = TestUtils
+        .createStorageReport(capacity, used, remaining, path, null, storageId,
+            1);
     SCMNodeStorageStatMap.NodeReportStatus status =
-        map.processNodeReport(key, nrb.addStorageReport(srb).build());
+        map.processNodeReport(key, TestUtils.createNodeReport(reports));
     Assert.assertEquals(status,
         SCMNodeStorageStatMap.NodeReportStatus.ALL_IS_WELL);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d2d9dbc/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index da39bb3..9ac1467 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -66,6 +66,7 @@ import static org.mockito.Mockito.mock;
 
 import java.io.File;
 import java.net.InetSocketAddress;
+import java.util.List;
 import java.util.UUID;
 
 import static org.apache.hadoop.hdds.scm.TestUtils.getDatanodeDetails;
@@ -207,12 +208,12 @@ public class TestEndPoint {
   @Test
   public void testRegister() throws Exception {
     DatanodeDetails nodeToRegister = getDatanodeDetails();
-    try (EndpointStateMachine rpcEndPoint =
-             createEndpoint(
-                 SCMTestUtils.getConf(), serverAddress, 1000)) {
+    try (EndpointStateMachine rpcEndPoint = createEndpoint(
+        SCMTestUtils.getConf(), serverAddress, 1000)) {
       SCMRegisteredCmdResponseProto responseProto = rpcEndPoint.getEndPoint()
-          .register(nodeToRegister.getProtoBufMessage(),
-              TestUtils.createNodeReport(),
+          .register(nodeToRegister.getProtoBufMessage(), TestUtils
+                  .createNodeReport(
+                      getStorageReports(nodeToRegister.getUuidString())),
               createContainerReport(10, nodeToRegister));
       Assert.assertNotNull(responseProto);
       Assert.assertEquals(nodeToRegister.getUuidString(),
@@ -220,11 +221,15 @@ public class TestEndPoint {
       Assert.assertNotNull(responseProto.getClusterID());
       Assert.assertEquals(10, scmServerImpl.
           getContainerCountsForDatanode(nodeToRegister));
-      Assert.assertEquals(1, scmServerImpl.getNodeReportsCount(
-          nodeToRegister));
+      Assert.assertEquals(1, scmServerImpl.getNodeReportsCount(nodeToRegister));
     }
   }
 
+  private List<SCMStorageReport> getStorageReports(String id) {
+    String storagePath = testDir.getAbsolutePath() + "/" + id;
+    return TestUtils.createStorageReport(100, 10, 90, storagePath, null, id, 1);
+  }
+
   private EndpointStateMachine registerTaskHelper(InetSocketAddress scmAddress,
       int rpcTimeout, boolean clearDatanodeDetails) throws Exception {
     Configuration conf = SCMTestUtils.getConf();
@@ -234,7 +239,7 @@ public class TestEndPoint {
     rpcEndPoint.setState(EndpointStateMachine.EndPointStates.REGISTER);
     OzoneContainer ozoneContainer = mock(OzoneContainer.class);
     when(ozoneContainer.getNodeReport()).thenReturn(TestUtils
-        .createNodeReport());
+        .createNodeReport(getStorageReports(UUID.randomUUID().toString())));
     when(ozoneContainer.getContainerReport()).thenReturn(
         createContainerReport(10, null));
     RegisterEndpointTask endpointTask =
@@ -297,14 +302,11 @@ public class TestEndPoint {
     try (EndpointStateMachine rpcEndPoint =
              createEndpoint(SCMTestUtils.getConf(),
                  serverAddress, 1000)) {
-      SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
-      SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
-      srb.setStorageUuid(UUID.randomUUID().toString());
-      srb.setCapacity(2000).setScmUsed(500).setRemaining(1500).build();
-      nrb.addStorageReport(srb);
+      String storageId = UUID.randomUUID().toString();
       SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint()
-          .sendHeartbeat(
-              dataNode.getProtoBufMessage(), nrb.build(), defaultReportState);
+          .sendHeartbeat(dataNode.getProtoBufMessage(),
+              TestUtils.createNodeReport(getStorageReports(storageId)),
+              defaultReportState);
       Assert.assertNotNull(responseProto);
       Assert.assertEquals(0, responseProto.getCommandsCount());
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: HDDS-76. Modify SCMStorageReportProto to include the data dir paths as well as the StorageType info. Contributed by Shashikant Banerjee.

Posted by ar...@apache.org.
HDDS-76. Modify SCMStorageReportProto to include the data dir paths as well as the StorageType info. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e996867
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e996867
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e996867

Branch: refs/heads/HDDS-48
Commit: 6e996867f641b297e3f88068f6d185b709d509b0
Parents: 3159bff
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Fri May 18 14:08:46 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Fri May 18 14:08:46 2018 +0530

----------------------------------------------------------------------
 .../impl/ContainerLocationManagerImpl.java      |  18 +-
 .../common/impl/ContainerManagerImpl.java       |   9 +-
 .../common/impl/ContainerStorageLocation.java   |   9 +
 .../common/impl/StorageLocationReport.java      | 235 ++++++++++++++++++-
 .../StorageContainerDatanodeProtocol.proto      |  21 +-
 5 files changed, 275 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e996867/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
index e0e826c..5f5b81f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
@@ -108,12 +108,14 @@ public class ContainerLocationManagerImpl implements ContainerLocationManager,
 
   @Override
   public StorageLocationReport[] getLocationReport() throws IOException {
+    boolean failed;
     StorageLocationReport[] reports =
         new StorageLocationReport[dataLocations.size()];
     for (int idx = 0; idx < dataLocations.size(); idx++) {
       ContainerStorageLocation loc = dataLocations.get(idx);
       long scmUsed = 0;
       long remaining = 0;
+      failed = false;
       try {
         scmUsed = loc.getScmUsed();
         remaining = loc.getAvailable();
@@ -123,13 +125,19 @@ public class ContainerLocationManagerImpl implements ContainerLocationManager,
         // reset scmUsed and remaining if df/du failed.
         scmUsed = 0;
         remaining = 0;
+        failed = true;
       }
 
-      // TODO: handle failed storage
-      // For now, include storage report for location that failed to get df/du.
-      StorageLocationReport r = new StorageLocationReport(
-          loc.getStorageUuId(), false, loc.getCapacity(),
-          scmUsed, remaining);
+      StorageLocationReport.Builder builder =
+          StorageLocationReport.newBuilder();
+      builder.setStorageLocation(loc.getStorageLocation())
+          .setId(loc.getStorageUuId())
+          .setFailed(failed)
+          .setCapacity(loc.getCapacity())
+          .setRemaining(remaining)
+          .setScmUsed(scmUsed)
+          .setStorageType(loc.getStorageType());
+      StorageLocationReport r = builder.build();
       reports[idx] = r;
     }
     return reports;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e996867/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 240beba..039b4c3 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -22,6 +22,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
@@ -38,6 +39,8 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.StorageTypeProto;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
@@ -818,11 +821,7 @@ public class ContainerManagerImpl implements ContainerManager {
     SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
     for (int i = 0; i < reports.length; i++) {
       SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
-      nrb.addStorageReport(i, srb.setStorageUuid(reports[i].getId())
-          .setCapacity(reports[i].getCapacity())
-          .setScmUsed(reports[i].getScmUsed())
-          .setRemaining(reports[i].getRemaining())
-          .build());
+      nrb.addStorageReport(reports[i].getProtoBufMessage());
     }
     return nrb.build();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e996867/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStorageLocation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStorageLocation.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStorageLocation.java
index 7293895..7431baa 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStorageLocation.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStorageLocation.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CachingGetSpaceUsed;
 import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.GetSpaceUsed;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.io.IOUtils;
@@ -121,6 +122,14 @@ public class ContainerStorageLocation {
     return scmUsage.getUsed();
   }
 
+  public String getStorageLocation() {
+    return getNormalizedUri().getRawPath();
+  }
+
+  public StorageType getStorageType() {
+    return dataLocation.getStorageType();
+  }
+
   public void shutdown() {
     saveScmUsed();
     scmUsedSaved = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e996867/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
index 7ef91a9..a5ad6c2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
@@ -18,26 +18,38 @@
 
 package org.apache.hadoop.ozone.container.common.impl;
 
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.protocol.proto.
+    StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+import org.apache.hadoop.hdds.protocol.proto.
+    StorageContainerDatanodeProtocolProtos.StorageTypeProto;
+
+import java.io.IOException;
+
 /**
  * Storage location stats of datanodes that provide back store for containers.
  *
  */
 public class StorageLocationReport {
-  public static final StorageLocationReport[] EMPTY_ARRAY = {};
 
   private final String id;
   private final boolean failed;
   private final long capacity;
   private final long scmUsed;
   private final long remaining;
+  private final StorageType storageType;
+  private final String storageLocation;
 
-  public StorageLocationReport(String id, boolean failed,
-      long capacity, long scmUsed, long remaining) {
+  private StorageLocationReport(String id, boolean failed, long capacity,
+      long scmUsed, long remaining, StorageType storageType,
+      String storageLocation) {
     this.id = id;
     this.failed = failed;
     this.capacity = capacity;
     this.scmUsed = scmUsed;
     this.remaining = remaining;
+    this.storageType = storageType;
+    this.storageLocation = storageLocation;
   }
 
   public String getId() {
@@ -60,4 +72,221 @@ public class StorageLocationReport {
     return remaining;
   }
 
+  public String getStorageLocation() {
+    return storageLocation;
+  }
+
+  public StorageType getStorageType() {
+    return storageType;
+  }
+
+
+  private StorageTypeProto getStorageTypeProto() throws
+      IOException {
+    StorageTypeProto storageTypeProto;
+    switch (getStorageType()) {
+    case SSD:
+      storageTypeProto = StorageTypeProto.SSD;
+      break;
+    case DISK:
+      storageTypeProto = StorageTypeProto.DISK;
+      break;
+    case ARCHIVE:
+      storageTypeProto = StorageTypeProto.ARCHIVE;
+      break;
+    case PROVIDED:
+      storageTypeProto = StorageTypeProto.PROVIDED;
+      break;
+    case RAM_DISK:
+      storageTypeProto = StorageTypeProto.RAM_DISK;
+      break;
+    default:
+      throw new IOException("Illegal Storage Type specified");
+    }
+    return storageTypeProto;
+  }
+
+  private static StorageType getStorageType(StorageTypeProto proto) throws
+      IOException {
+    StorageType storageType;
+    switch (proto) {
+    case SSD:
+      storageType = StorageType.SSD;
+      break;
+    case DISK:
+      storageType = StorageType.DISK;
+      break;
+    case ARCHIVE:
+      storageType = StorageType.ARCHIVE;
+      break;
+    case PROVIDED:
+      storageType = StorageType.PROVIDED;
+      break;
+    case RAM_DISK:
+      storageType = StorageType.RAM_DISK;
+      break;
+    default:
+      throw new IOException("Illegal Storage Type specified");
+    }
+    return storageType;
+  }
+
+  /**
+   * Returns the SCMStorageReport protoBuf message for the Storage Location
+   * report.
+   * @return SCMStorageReport
+   * @throws IOException In case, the storage type specified is invalid.
+   */
+  public SCMStorageReport getProtoBufMessage() throws IOException{
+    SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
+    return srb.setStorageUuid(getId())
+        .setCapacity(getCapacity())
+        .setScmUsed(getScmUsed())
+        .setRemaining(getRemaining())
+        .setStorageType(getStorageTypeProto())
+        .setStorageLocation(getStorageLocation())
+        .setFailed(isFailed())
+        .build();
+  }
+
+  /**
+   * Returns the StorageLocationReport from the protoBuf message.
+   * @param report SCMStorageReport
+   * @return StorageLocationReport
+   * @throws IOException in case of invalid storage type
+   */
+
+  public static StorageLocationReport getFromProtobuf(SCMStorageReport report)
+      throws IOException {
+    StorageLocationReport.Builder builder = StorageLocationReport.newBuilder();
+    builder.setId(report.getStorageUuid())
+        .setStorageLocation(report.getStorageLocation());
+    if (report.hasCapacity()) {
+      builder.setCapacity(report.getCapacity());
+    }
+    if (report.hasScmUsed()) {
+      builder.setScmUsed(report.getScmUsed());
+    }
+    if (report.hasStorageType()) {
+      builder.setStorageType(getStorageType(report.getStorageType()));
+    }
+    if (report.hasRemaining()) {
+      builder.setRemaining(report.getRemaining());
+    }
+
+    if (report.hasFailed()) {
+      builder.setFailed(report.getFailed());
+    }
+    return builder.build();
+  }
+
+  /**
+   * Returns StorageLocation.Builder instance.
+   *
+   * @return StorageLocation.Builder
+   */
+  public static Builder newBuilder() {
+    return new Builder();
+  }
+
+  /**
+   * Builder class for building StorageLocationReport.
+   */
+  public static class Builder {
+    private String id;
+    private boolean failed;
+    private long capacity;
+    private long scmUsed;
+    private long remaining;
+    private StorageType storageType;
+    private String storageLocation;
+
+    /**
+     * Sets the storageId.
+     *
+     * @param id storageId
+     * @return StorageLocationReport.Builder
+     */
+    public Builder setId(String id) {
+      this.id = id;
+      return this;
+    }
+
+    /**
+     * Sets whether the volume failed or not.
+     *
+     * @param failed whether volume failed or not
+     * @return StorageLocationReport.Builder
+     */
+    public Builder setFailed(boolean failed) {
+      this.failed = failed;
+      return this;
+    }
+
+    /**
+     * Sets the capacity of volume.
+     *
+     * @param capacity capacity
+     * @return StorageLocationReport.Builder
+     */
+    public Builder setCapacity(long capacity) {
+      this.capacity = capacity;
+      return this;
+    }
+    /**
+     * Sets the scmUsed Value.
+     *
+     * @param scmUsed storage space used by scm
+     * @return StorageLocationReport.Builder
+     */
+    public Builder setScmUsed(long scmUsed) {
+      this.scmUsed = scmUsed;
+      return this;
+    }
+
+    /**
+     * Sets the remaining free space value.
+     *
+     * @param remaining remaining free space
+     * @return StorageLocationReport.Builder
+     */
+    public Builder setRemaining(long remaining) {
+      this.remaining = remaining;
+      return this;
+    }
+
+    /**
+     * Sets the storageType.
+     *
+     * @param storageType type of the storage used
+     * @return StorageLocationReport.Builder
+     */
+    public Builder setStorageType(StorageType storageType) {
+      this.storageType = storageType;
+      return this;
+    }
+
+    /**
+     * Sets the storageLocation.
+     *
+     * @param storageLocation location of the volume
+     * @return StorageLocationReport.Builder
+     */
+    public Builder setStorageLocation(String storageLocation) {
+      this.storageLocation = storageLocation;
+      return this;
+    }
+
+    /**
+     * Builds and returns StorageLocationReport instance.
+     *
+     * @return StorageLocationReport
+     */
+    public StorageLocationReport build() {
+      return new StorageLocationReport(id, failed, capacity, scmUsed,
+          remaining, storageType, storageLocation);
+    }
+
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e996867/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index f44abc9..2b34d11 100644
--- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -136,12 +136,25 @@ message SCMNodeReport {
   repeated SCMStorageReport storageReport = 1;
 }
 
+/**
+ * Types of recognized storage media.
+ */
+enum StorageTypeProto {
+  DISK = 1;
+  SSD = 2;
+  ARCHIVE = 3;
+  RAM_DISK = 4;
+  PROVIDED = 5;
+}
+
 message SCMStorageReport {
   required string storageUuid = 1;
-  optional uint64 capacity = 2 [default = 0];
-  optional uint64 scmUsed = 3 [default = 0];
-  optional uint64 remaining = 4 [default = 0];
-  //optional hadoop.hdfs.StorageTypeProto storageType = 5 [default = DISK];
+  required string storageLocation = 2;
+  optional uint64 capacity = 3 [default = 0];
+  optional uint64 scmUsed = 4 [default = 0];
+  optional uint64 remaining = 5 [default = 0];
+  optional StorageTypeProto storageType = 6 [default = DISK];
+  optional bool failed = 7 [default = false];
 }
 
 message SCMRegisterRequestProto {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: HDDS-57. TestContainerCloser#testRepeatedClose and TestContainerCloser#testCleanupThreadRuns fail consistently. Contributed by Shashikant Banerjee.

Posted by ar...@apache.org.
HDDS-57. TestContainerCloser#testRepeatedClose and TestContainerCloser#testCleanupThreadRuns fail consistently. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c97df771
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c97df771
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c97df771

Branch: refs/heads/HDDS-48
Commit: c97df7712ce35938c2f4ccbbdc60c6671a7a67b0
Parents: ba84284
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Mon May 21 19:06:26 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Mon May 21 19:06:26 2018 +0530

----------------------------------------------------------------------
 .../hadoop/hdds/scm/container/closer/TestContainerCloser.java     | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c97df771/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
index f3f37c7..15ecbad 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
@@ -213,8 +213,7 @@ public class TestContainerCloser {
         .setReadCount(100000000L)
         .setWriteCount(100000000L)
         .setReadBytes(2000000000L)
-        .setWriteBytes(2000000000L)
-        .setContainerID(1L);
+        .setWriteBytes(2000000000L);
     reports.setDatanodeDetails(
         TestUtils.getDatanodeDetails().getProtoBufMessage());
     reports.addReports(ciBuilder);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

Posted by ar...@apache.org.
HDDS-89. Create ozone specific inline documentation as part of the build.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/481bfdb9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/481bfdb9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/481bfdb9

Branch: refs/heads/HDDS-48
Commit: 481bfdb94ff2dd3038fd20b1604358ac78e422d4
Parents: 6176d2b
Author: Anu Engineer <ae...@apache.org>
Authored: Tue May 22 10:49:10 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue May 22 10:49:10 2018 -0700

----------------------------------------------------------------------
 .gitignore                                      |   2 +
 dev-support/bin/ozone-dist-layout-stitching     |   4 +
 hadoop-dist/pom.xml                             |   6 +
 hadoop-ozone/docs/README.md                     |  52 ++
 hadoop-ozone/docs/archetypes/default.md         |  17 +
 hadoop-ozone/docs/config.toml                   |  23 +
 hadoop-ozone/docs/content/CommandShell.md       | 153 +++++
 hadoop-ozone/docs/content/GettingStarted.md     | 352 ++++++++++++
 hadoop-ozone/docs/content/Metrics.md            | 170 ++++++
 hadoop-ozone/docs/content/Rest.md               | 553 +++++++++++++++++++
 hadoop-ozone/docs/content/_index.md             | 102 ++++
 .../docs/dev-support/bin/generate-site.sh       |  29 +
 hadoop-ozone/docs/pom.xml                       |  58 ++
 hadoop-ozone/docs/static/OzoneOverview.png      | Bin 0 -> 41729 bytes
 hadoop-ozone/docs/static/OzoneOverview.svg      | 225 ++++++++
 hadoop-ozone/docs/static/SCMBlockDiagram.png    | Bin 0 -> 14714 bytes
 .../ozonedoc/layouts/_default/single.html       |  32 ++
 .../docs/themes/ozonedoc/layouts/index.html     |  21 +
 .../ozonedoc/layouts/partials/footer.html       |  19 +
 .../ozonedoc/layouts/partials/header.html       |  31 ++
 .../ozonedoc/layouts/partials/navbar.html       |  33 ++
 .../ozonedoc/layouts/partials/sidebar.html      |  43 ++
 .../ozonedoc/static/css/bootstrap-theme.min.css |   6 +
 .../static/css/bootstrap-theme.min.css.map      |   1 +
 .../ozonedoc/static/css/bootstrap.min.css       |   6 +
 .../ozonedoc/static/css/bootstrap.min.css.map   |   1 +
 .../themes/ozonedoc/static/css/ozonedoc.css     | 128 +++++
 .../fonts/glyphicons-halflings-regular.eot      | Bin 0 -> 20127 bytes
 .../fonts/glyphicons-halflings-regular.svg      | 288 ++++++++++
 .../fonts/glyphicons-halflings-regular.ttf      | Bin 0 -> 45404 bytes
 .../fonts/glyphicons-halflings-regular.woff     | Bin 0 -> 23424 bytes
 .../fonts/glyphicons-halflings-regular.woff2    | Bin 0 -> 18028 bytes
 .../themes/ozonedoc/static/js/bootstrap.min.js  |   7 +
 .../themes/ozonedoc/static/js/jquery.min.js     |   5 +
 .../docs/themes/ozonedoc/static/js/ozonedoc.js  |  23 +
 hadoop-ozone/docs/themes/ozonedoc/theme.toml    |   2 +
 .../src/main/site/markdown/OzoneCommandShell.md | 150 -----
 .../site/markdown/OzoneGettingStarted.md.vm     | 347 ------------
 .../src/main/site/markdown/OzoneMetrics.md      | 166 ------
 .../src/main/site/markdown/OzoneOverview.md     |  88 ---
 .../src/main/site/markdown/OzoneRest.md         | 549 ------------------
 hadoop-ozone/pom.xml                            |   1 +
 hadoop-project/pom.xml                          |   5 +
 43 files changed, 2398 insertions(+), 1300 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index 3883ce2..934c009 100644
--- a/.gitignore
+++ b/.gitignore
@@ -53,3 +53,5 @@ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn-error.log
 log.html
 output.xml
 report.html
+
+hadoop-ozone/docs/public

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/dev-support/bin/ozone-dist-layout-stitching
----------------------------------------------------------------------
diff --git a/dev-support/bin/ozone-dist-layout-stitching b/dev-support/bin/ozone-dist-layout-stitching
index 1b0b224..ad8abe2 100755
--- a/dev-support/bin/ozone-dist-layout-stitching
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -145,6 +145,10 @@ run copy "${ROOT}/hadoop-ozone/ozone-manager/target/hadoop-ozone-ozone-manager-$
 run copy "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}" .
 run copy "${ROOT}/hadoop-ozone/client/target/hadoop-ozone-client-${HDDS_VERSION}" .
 run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" .
+# Optional documentation, could be missing
+cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/ozone/webapps/ksm/
+cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdds/webapps/scm/
+
 
 mkdir -p ./share/hadoop/mapreduce
 mkdir -p ./share/hadoop/yarn

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 999d44c..41e040f 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -68,6 +68,12 @@
       <artifactId>hadoop-client-integration-tests</artifactId>
       <scope>provided</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-docs</artifactId>
+      <scope>provided</scope>
+    </dependency>
+
 
 
   </dependencies>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/README.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/README.md b/hadoop-ozone/docs/README.md
new file mode 100644
index 0000000..426789f
--- /dev/null
+++ b/hadoop-ozone/docs/README.md
@@ -0,0 +1,52 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+# Hadoop Ozone/HDDS docs
+
+This subproject contains the inline documentation for Ozone/HDDS components.
+
+You can create a new page with:
+
+```
+hugo new content/title.md
+```
+
+You can check the rendering with:
+
+```
+hugo serve
+```
+
+This maven project will create the rendered HTML page during the build (ONLY if hugo is available). 
+And the dist project will include the documentation.
+
+You can adjust the menu hierarchy with adjusting the header of the markdown file:
+
+To show it in the main header add the menu entry:
+
+```
+---
+menu: main
+---
+```
+
+To show it as a subpage, you can set the parent. (The value could be the title of the parent page, 
+our you can defined an `id: ...` in the parent markdown and use that in the parent reference.
+
+```
+---
+menu:
+   main:
+	   parent: "Getting started"
+---
+```

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/archetypes/default.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/archetypes/default.md b/hadoop-ozone/docs/archetypes/default.md
new file mode 100644
index 0000000..e67e68a
--- /dev/null
+++ b/hadoop-ozone/docs/archetypes/default.md
@@ -0,0 +1,17 @@
+---
+title: "{{ replace .Name "-" " " | title }}"
+menu: main
+---
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/config.toml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/config.toml b/hadoop-ozone/docs/config.toml
new file mode 100644
index 0000000..eed74a9
--- /dev/null
+++ b/hadoop-ozone/docs/config.toml
@@ -0,0 +1,23 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+baseURL = "/"
+
+languageCode = "en-us"
+DefaultContentLanguage = "en"
+title = "Ozone"
+theme = "ozonedoc"
+pygmentsCodeFences = true
+pygmentsStyle = "monokailight"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/content/CommandShell.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/CommandShell.md b/hadoop-ozone/docs/content/CommandShell.md
new file mode 100644
index 0000000..d8a733a
--- /dev/null
+++ b/hadoop-ozone/docs/content/CommandShell.md
@@ -0,0 +1,153 @@
+---
+title: Command Shell
+menu: main
+---
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+Ozone Command Shell
+===================
+
+Ozone command shell gives a command shell interface to work against ozone.
+Please note that this  document assumes that cluster is deployed
+with simple authentication.
+
+The Ozone commands take the following format.
+
+* `ozone oz --command_ http://hostname:port/volume/bucket/key -user
+<name> -root`
+
+The *port* specified in command should match the port mentioned in the config
+property `hdds.rest.http-address`. This property can be set in `ozone-site.xml`.
+The default value for the port is `9880` and is used in below commands.
+
+The *-root* option is a command line short cut that allows *ozone oz*
+commands to be run as the user that started the cluster. This is useful to
+indicate that you want the commands to be run as some admin user. The only
+reason for this option is that it makes the life of a lazy developer more
+easier.
+
+Ozone Volume Commands
+--------------------
+
+The volume commands allow users to create, delete and list the volumes in the
+ozone cluster.
+
+### Create Volume
+
+Volumes can be created only by Admins. Here is an example of creating a volume.
+
+* `ozone oz -createVolume http://localhost:9880/hive -user bilbo -quota
+100TB -root`
+
+The above command creates a volume called `hive` owned by user `bilbo`. The
+`-root` option allows the command to be executed as user `hdfs` which is an
+admin in the cluster.
+
+### Update Volume
+
+Updates information like ownership and quota on an existing volume.
+
+* `ozone oz  -updateVolume  http://localhost:9880/hive -quota 500TB -root`
+
+The above command changes the volume quota of hive from 100TB to 500TB.
+
+### Delete Volume
+Deletes a Volume if it is empty.
+
+* `ozone oz -deleteVolume http://localhost:9880/hive -root`
+
+
+### Info Volume
+Info volume command allows the owner or the administrator of the cluster to read meta-data about a specific volume.
+
+* `ozone oz -infoVolume http://localhost:9880/hive -root`
+
+### List Volumes
+
+List volume command can be used by administrator to list volumes of any user. It can also be used by a user to list volumes owned by him.
+
+* `ozone oz -listVolume http://localhost:9880/ -user bilbo -root`
+
+The above command lists all volumes owned by user bilbo.
+
+Ozone Bucket Commands
+--------------------
+
+Bucket commands follow a similar pattern as volume commands. However bucket commands are designed to be run by the owner of the volume.
+Following examples assume that these commands are run by the owner of the volume or bucket.
+
+
+### Create Bucket
+
+Create bucket call allows the owner of a volume to create a bucket.
+
+* `ozone oz -createBucket http://localhost:9880/hive/january`
+
+This call creates a bucket called `january` in the volume called `hive`. If
+the volume does not exist, then this call will fail.
+
+
+### Update Bucket
+Updates bucket meta-data, like ACLs.
+
+* `ozone oz -updateBucket http://localhost:9880/hive/january  -addAcl
+user:spark:rw`
+
+### Delete Bucket
+Deletes a bucket if it is empty.
+
+* `ozone oz -deleteBucket http://localhost:9880/hive/january`
+
+### Info Bucket
+Returns information about a given bucket.
+
+* `ozone oz -infoBucket http://localhost:9880/hive/january`
+
+### List Buckets
+List buckets on a given volume.
+
+* `ozone oz -listBucket http://localhost:9880/hive`
+
+Ozone Key Commands
+------------------
+
+Ozone key commands allows users to put, delete and get keys from ozone buckets.
+
+### Put Key
+Creates or overwrites a key in ozone store, -file points to the file you want
+to upload.
+
+* `ozone oz -putKey  http://localhost:9880/hive/january/processed.orc  -file
+processed.orc`
+
+### Get Key
+Downloads a file from the ozone bucket.
+
+* `ozone oz -getKey  http://localhost:9880/hive/january/processed.orc  -file
+  processed.orc.copy`
+
+### Delete Key
+Deletes a key  from the ozone store.
+
+* `ozone oz -deleteKey http://localhost:9880/hive/january/processed.orc`
+
+### Info Key
+Reads  key metadata from the ozone store.
+
+* `ozone oz -infoKey http://localhost:9880/hive/january/processed.orc`
+
+### List Keys
+List all keys in an ozone bucket.
+
+* `ozone oz -listKey  http://localhost:9880/hive/january`

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/content/GettingStarted.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/GettingStarted.md b/hadoop-ozone/docs/content/GettingStarted.md
new file mode 100644
index 0000000..6b2316e
--- /dev/null
+++ b/hadoop-ozone/docs/content/GettingStarted.md
@@ -0,0 +1,352 @@
+---
+title: Getting started
+weight: -2
+menu: main
+---
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+Ozone - Object store for Hadoop
+==============================
+
+Introduction
+------------
+Ozone is an object store for Hadoop. It  is a redundant, distributed object
+store build by leveraging primitives present in HDFS. Ozone supports REST
+API for accessing the store.
+
+Getting Started
+---------------
+Ozone is a work in progress and currently lives in the hadoop source tree.
+The subprojects (ozone/hdds) are part of the hadoop source tree but by default
+not compiled and not part of the official releases. To
+use it, you have to build a package by yourself and deploy a cluster.
+
+### Building Ozone
+
+To build Ozone, please checkout the hadoop sources from github. Then
+checkout the trunk branch and build it.
+
+`mvn clean package -DskipTests=true -Dmaven.javadoc.skip=true -Pdist -Phdds -Dtar -DskipShade`
+
+skipShade is just to make compilation faster and not really required.
+
+This will give you a tarball in your distribution directory. This is the
+tarball that can be used for deploying your hadoop cluster. Here is an
+example of the tarball that will be generated.
+
+* `~/apache/hadoop/hadoop-dist/target/${project.version}.tar.gz`
+
+At this point we have an option to setup a physical cluster or run ozone via
+docker.
+
+Running Ozone via Docker
+------------------------
+
+This assumes that you have a running docker setup on the machine. Please run
+these following commands to see ozone in action.
+
+ Go to the directory where the docker compose files exist.
+
+
+ - `cd hadoop-dist/target/compose/ozone`
+
+Tell docker to start ozone, this will start a KSM, SCM and a single datanode in
+the background.
+
+
+ - `docker-compose up -d`
+
+Now let us run some work load against ozone, to do that we will run freon.
+
+This will log into the datanode and run bash.
+
+ - `docker-compose exec datanode bash`
+
+Now you can run the `ozone` command shell or freon, the ozone load generator.
+
+This is the command to run freon.
+
+ - `ozone freon -mode offline -validateWrites -numOfVolumes 1 -numOfBuckets 10 -numOfKeys 100`
+
+You can checkout the KSM UI to see the requests information.
+
+ - `http://localhost:9874/`
+
+If you need more datanode you can scale up:
+
+ - `docker-compose scale datanode=3`
+
+Running Ozone using a real cluster
+----------------------------------
+
+Please proceed to setup a hadoop cluster by creating the hdfs-site.xml and
+other configuration files that are needed for your cluster.
+
+
+### Ozone Configuration
+
+Ozone relies on its own configuration file called `ozone-site.xml`. It is
+just for convenience and ease of management --  you can add these settings
+to `hdfs-site.xml`, if you don't want to keep ozone settings separate.
+This document refers to `ozone-site.xml` so that ozone settings are in one
+place  and not mingled with HDFS settings.
+
+ * _*ozone.enabled*_  This is the most important setting for ozone.
+ Currently, Ozone is an opt-in subsystem of HDFS. By default, Ozone is
+ disabled. Setting this flag to `true` enables ozone in the HDFS cluster.
+ Here is an example,
+
+```
+    <property>
+       <name>ozone.enabled</name>
+       <value>True</value>
+    </property>
+```
+ *  _*ozone.metadata.dirs*_ Ozone is designed with modern hardware
+ in mind. It tries to use SSDs effectively. So users can specify where the
+ metadata must reside. Usually you pick your fastest disk (SSD if
+ you have them on your nodes). KSM, SCM and datanode will write the metadata
+ to these disks. This is a required setting, if this is missing Ozone will
+ fail to come up. Here is an example,
+
+```
+   <property>
+      <name>ozone.metadata.dirs</name>
+      <value>/data/disk1/meta</value>
+   </property>
+```
+
+* _*ozone.scm.names*_ Ozone is build on top of container framework. Storage
+ container manager(SCM) is a distributed block service which is used by ozone
+ and other storage services.
+ This property allows datanodes to discover where SCM is, so that
+ datanodes can send heartbeat to SCM. SCM is designed to be highly available
+ and datanodes assume there are multiple instances of SCM which form a highly
+ available ring. The HA feature of SCM is a work in progress. So we
+ configure ozone.scm.names to be a single machine. Here is an example,
+
+```
+    <property>
+      <name>ozone.scm.names</name>
+      <value>scm.hadoop.apache.org</value>
+    </property>
+```
+
+* _*ozone.scm.datanode.id*_ Each datanode that speaks to SCM generates an ID
+just like HDFS.  This is an optional setting. Please note:
+This path will be created by datanodes if it doesn't exist already. Here is an
+ example,
+
+```
+   <property>
+      <name>ozone.scm.datanode.id</name>
+      <value>/data/disk1/scm/meta/node/datanode.id</value>
+   </property>
+```
+
+* _*ozone.scm.block.client.address*_ Storage Container Manager(SCM) offers a
+ set of services that can be used to build a distributed storage system. One
+ of the services offered is the block services. KSM and HDFS would use this
+ service. This property describes where KSM can discover SCM's block service
+ endpoint. There is corresponding ports etc, but assuming that we are using
+ default ports, the server address is the only required field. Here is an
+ example,
+
+```
+    <property>
+      <name>ozone.scm.block.client.address</name>
+      <value>scm.hadoop.apache.org</value>
+    </property>
+```
+
+* _*ozone.ksm.address*_ KSM server address. This is used by Ozonehandler and
+Ozone File System.
+
+```
+    <property>
+       <name>ozone.ksm.address</name>
+       <value>ksm.hadoop.apache.org</value>
+    </property>
+```
+
+* _*dfs.datanode.plugin*_ Datanode service plugins: the container manager part
+ of ozone is running inside the datanode as a service plugin. To activate ozone
+ you should define the service plugin implementation class. **Important**
+ It should be added to the **hdfs-site.xml** as the plugin should be activated
+ as part of the normal HDFS Datanode bootstrap.
+
+```
+    <property>
+       <name>dfs.datanode.plugins</name>
+       <value>org.apache.hadoop.ozone.HddsDatanodeService</value>
+    </property>
+```
+
+Here is a quick summary of settings needed by Ozone.
+
+| Setting                        | Value                        | Comment |
+|--------------------------------|------------------------------|------------------------------------------------------------------|
+| ozone.enabled                  | True                         | This enables SCM and  containers in HDFS cluster.                |
+| ozone.metadata.dirs            | file path                    | The metadata will be stored here.                                |
+| ozone.scm.names                | SCM server name              | Hostname:port or or IP:port address of SCM.                      |
+| ozone.scm.block.client.address | SCM server name and port     | Used by services like KSM                                        |
+| ozone.scm.client.address       | SCM server name and port     | Used by client side                                              |
+| ozone.scm.datanode.address     | SCM server name and port     | Used by datanode to talk to SCM                                  |
+| ozone.ksm.address              | KSM server name              | Used by Ozone handler and Ozone file system.                     |
+
+ Here is a working example of`ozone-site.xml`.
+
+```
+    <?xml version="1.0" encoding="UTF-8"?>
+    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+    <configuration>
+      <property>
+          <name>ozone.enabled</name>
+          <value>True</value>
+        </property>
+
+        <property>
+          <name>ozone.metadata.dirs</name>
+          <value>/data/disk1/ozone/meta</value>
+        </property>
+
+        <property>
+          <name>ozone.scm.names</name>
+          <value>127.0.0.1</value>
+        </property>
+
+        <property>
+           <name>ozone.scm.client.address</name>
+           <value>127.0.0.1:9860</value>
+        </property>
+
+         <property>
+           <name>ozone.scm.block.client.address</name>
+           <value>127.0.0.1:9863</value>
+         </property>
+
+         <property>
+           <name>ozone.scm.datanode.address</name>
+           <value>127.0.0.1:9861</value>
+         </property>
+
+         <property>
+           <name>ozone.ksm.address</name>
+           <value>127.0.0.1:9874</value>
+         </property>
+    </configuration>
+```
+
+And don't forget to enable the datanode component with adding the
+following configuration to the hdfs-site.xml:
+
+```
+    <property>
+       <name>dfs.datanode.plugins</name>
+       <value>org.apache.hadoop.ozone.HddsDatanodeService</value>
+    </property>
+```
+
+### Starting Ozone
+
+Ozone is designed to run concurrently with HDFS. The simplest way to [start
+HDFS](../hadoop-common/ClusterSetup.html) is to run `start-dfs.sh` from the
+`$HADOOP/sbin/start-dfs.sh`. Once HDFS
+is running, please verify it is fully functional by running some commands like
+
+   - *./hdfs dfs -mkdir /usr*
+   - *./hdfs dfs -ls /*
+
+ Once you are sure that HDFS is running, start Ozone. To start  ozone, you
+ need to start SCM and KSM. Currently we assume that both KSM and SCM
+  is running on the same node, this will change in future.
+
+ The first time you bring up Ozone, SCM must be initialized.
+
+   - `./ozone scm -init`
+
+ Start SCM.
+
+   - `./ozone --daemon start scm`
+
+ Once SCM gets started, KSM must be initialized.
+
+   - `./ozone ksm -createObjectStore`
+
+ Start KSM.
+
+   - `./ozone --daemon start ksm`
+
+if you would like to start HDFS and Ozone together, you can do that by running
+ a single command.
+ - `$HADOOP/sbin/start-ozone.sh`
+
+ This command will start HDFS and then start the ozone components.
+
+ Once you have ozone running you can use these ozone [shell](./OzoneCommandShell.html)
+ commands to  create a  volume, bucket and keys.
+
+### Diagnosing issues
+
+Ozone tries not to pollute the existing HDFS streams of configuration and
+logging. So ozone logs are by default configured to be written to a file
+called `ozone.log`. This is controlled by the settings in `log4j.properties`
+file in the hadoop configuration directory.
+
+Here is the log4j properties that are added by ozone.
+
+
+```
+   #
+   # Add a logger for ozone that is separate from the Datanode.
+   #
+   #log4j.debug=true
+   log4j.logger.org.apache.hadoop.ozone=DEBUG,OZONE,FILE
+
+   # Do not log into datanode logs. Remove this line to have single log.
+   log4j.additivity.org.apache.hadoop.ozone=false
+
+   # For development purposes, log both to console and log file.
+   log4j.appender.OZONE=org.apache.log4j.ConsoleAppender
+   log4j.appender.OZONE.Threshold=info
+   log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout
+   log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
+    %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n
+
+   # Real ozone logger that writes to ozone.log
+   log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
+   log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log
+   log4j.appender.FILE.Threshold=debug
+   log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
+   log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
+     (%F:%L) %X{function} %X{resource} %X{user} %X{request} - \
+      %m%n
+```
+
+If you would like to have a single datanode log instead of ozone stuff
+getting written to ozone.log, please remove this line or set this to true.
+
+ ` log4j.additivity.org.apache.hadoop.ozone=false`
+
+On the SCM/KSM side, you will be able to see
+
+  - `hadoop-hdfs-ksm-hostname.log`
+  - `hadoop-hdfs-scm-hostname.log`
+
+Please file any issues you see under the related issues:
+
+ - [Object store in HDFS: HDFS-7240](https://issues.apache.org/jira/browse/HDFS-7240)
+ - [Ozone File System: HDFS-13074](https://issues.apache.org/jira/browse/HDFS-13074)
+ - [Building HDFS on top of new storage layer (HDDS): HDFS-10419](https://issues.apache.org/jira/browse/HDFS-10419)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/content/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/Metrics.md b/hadoop-ozone/docs/content/Metrics.md
new file mode 100644
index 0000000..dc58460
--- /dev/null
+++ b/hadoop-ozone/docs/content/Metrics.md
@@ -0,0 +1,170 @@
+---
+title: Metrics
+menu: main
+---
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+
+
+HDFS Ozone Metrics
+===============
+
+<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
+
+Overview
+--------
+
+The container metrics that is used in HDFS Ozone.
+
+### Storage Container Metrics
+
+The metrics for various storage container operations in HDFS Ozone.
+
+Storage container is an optional service that can be enabled by setting
+'ozone.enabled' to true.
+These metrics are only available when ozone is enabled.
+
+Storage Container Metrics maintains a set of generic metrics for all
+container RPC calls that can be made to a datandoe/container.
+
+Along with the total number of RPC calls containers maintain a set of metrics
+for each RPC call. Following is the set of counters maintained for each RPC
+operation.
+
+*Total number of operation* - We maintain an array which counts how
+many times a specific operation has been performed.
+Eg.`NumCreateContainer` tells us how many times create container has been
+invoked on this datanode.
+
+*Total number of pending operation* - This is an array which counts how
+many times a specific operation is waitting to be processed from the client
+point of view.
+Eg.`NumPendingCreateContainer` tells us how many create container requests that
+waitting to be processed.
+
+*Average latency of each pending operation in nanoseconds* - The average latency
+of the operation from the client point of view.
+Eg. `CreateContainerLatencyAvgTime` - This tells us the average latency of
+Create Container from the client point of view.
+
+*Number of bytes involved in a specific command* - This is an array that is
+maintained for all operations, but makes sense only for read and write
+operations.
+
+While it is possible to read the bytes in update container, it really makes
+no sense, since no data stream involved. Users are advised to use this
+metric only when it makes sense. Eg. `BytesReadChunk` -- Tells us how
+many bytes have been read from this data using Read Chunk operation.
+
+*Average Latency of each operation* - The average latency of the operation.
+Eg. `LatencyCreateContainerAvgTime` - This tells us the average latency of
+Create Container.
+
+*Quantiles for each of these operations* - The 50/75/90/95/99th percentile
+of these operations. Eg. `CreateContainerNanos60s50thPercentileLatency` --
+gives latency of the create container operations at the 50th percentile latency
+(1 minute granularity). We report 50th, 75th, 90th, 95th and 99th percentile
+for all RPCs.
+
+So this leads to the containers reporting these counters for each of these
+RPC operations.
+
+| Name | Description |
+|:---- |:---- |
+| `NumOps` | Total number of container operations |
+| `CreateContainer` | Create container operation |
+| `ReadContainer` | Read container operation |
+| `UpdateContainer` | Update container operations |
+| `DeleteContainer` | Delete container operations |
+| `ListContainer` | List container operations |
+| `PutKey` | Put key operations |
+| `GetKey` | Get key operations |
+| `DeleteKey` | Delete key operations |
+| `ListKey` | List key operations |
+| `ReadChunk` | Read chunk operations |
+| `DeleteChunk` | Delete chunk operations |
+| `WriteChunk` | Write chunk operations|
+| `ListChunk` | List chunk operations |
+| `CompactChunk` | Compact chunk operations |
+| `PutSmallFile` | Put small file operations |
+| `GetSmallFile` | Get small file operations |
+| `CloseContainer` | Close container operations |
+
+### Storage Container Manager Metrics
+
+The metrics for containers that managed by Storage Container Manager.
+
+Storage Container Manager (SCM) is a master service which keeps track of
+replicas of storage containers. It also manages all data nodes and their
+states, dealing with container reports and dispatching commands for execution.
+
+Following are the counters for containers:
+
+| Name | Description |
+|:---- |:---- |
+| `LastContainerReportSize` | Total size in bytes of all containers in latest container report that SCM received from datanode |
+| `LastContainerReportUsed` | Total number of bytes used by all containers in latest container report that SCM received from datanode |
+| `LastContainerReportKeyCount` | Total number of keys in all containers in latest container report that SCM received from datanode |
+| `LastContainerReportReadBytes` | Total number of bytes have been read from all containers in latest container report that SCM received from datanode |
+| `LastContainerReportWriteBytes` | Total number of bytes have been written into all containers in latest container report that SCM received from datanode |
+| `LastContainerReportReadCount` | Total number of times containers have been read from in latest container report that SCM received from datanode |
+| `LastContainerReportWriteCount` | Total number of times containers have been written to in latest container report that SCM received from datanode |
+| `ContainerReportSize` | Total size in bytes of all containers over whole cluster |
+| `ContainerReportUsed` | Total number of bytes used by all containers over whole cluster |
+| `ContainerReportKeyCount` | Total number of keys in all containers over whole cluster |
+| `ContainerReportReadBytes` | Total number of bytes have been read from all containers over whole cluster |
+| `ContainerReportWriteBytes` | Total number of bytes have been written into all containers over whole cluster |
+| `ContainerReportReadCount` | Total number of times containers have been read from over whole cluster |
+| `ContainerReportWriteCount` | Total number of times containers have been written to over whole cluster |
+
+### Key Space Metrics
+
+The metrics for various key space manager operations in HDFS Ozone.
+
+key space manager (KSM) is a service that similar to the Namenode in HDFS.
+In the current design of KSM, it maintains metadata of all volumes, buckets and keys.
+These metrics are only available when ozone is enabled.
+
+Following is the set of counters maintained for each key space operation.
+
+*Total number of operation* - We maintain an array which counts how
+many times a specific operation has been performed.
+Eg.`NumVolumeCreate` tells us how many times create volume has been
+invoked in KSM.
+
+*Total number of failed operation* - This type operation is opposite to the above
+operation.
+Eg.`NumVolumeCreateFails` tells us how many times create volume has been invoked
+failed in KSM.
+
+Following are the counters for each of key space operations.
+
+| Name | Description |
+|:---- |:---- |
+| `VolumeCreate` | Create volume operation |
+| `VolumeUpdates` | Update volume property operation |
+| `VolumeInfos` | Get volume information operation |
+| `VolumeCheckAccesses` | Check volume access operation |
+| `VolumeDeletes` | Delete volume operation |
+| `VolumeLists` | List volume operation |
+| `BucketCreates` | Create bucket operation |
+| `BucketInfos` | Get bucket information operation |
+| `BucketUpdates` | Update bucket property operation |
+| `BucketDeletes` | Delete bucket operation |
+| `BucketLists` | List bucket operation |
+| `KeyAllocate` | Allocate key operation |
+| `KeyLookup` | Look up key operation |
+| `KeyDeletes` | Delete key operation |
+| `KeyLists` | List key operation |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/content/Rest.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/Rest.md b/hadoop-ozone/docs/content/Rest.md
new file mode 100644
index 0000000..2e935d6
--- /dev/null
+++ b/hadoop-ozone/docs/content/Rest.md
@@ -0,0 +1,553 @@
+---
+title: Ozone REST API
+menu: main
+---
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+Ozone REST API's.
+===================
+
+<!-- MACRO{toc|fromDepth=0|toDepth=1} -->
+
+Overview
+--------
+
+The Ozone REST API's allows user to access ozone via  REST protocol.
+
+Authentication and Authorization
+--------------------
+
+For time being, The default authentication mode of REST API is insecure access
+mode, which is *Simple* mode. Under this mode, ozone server trusts the user
+name specified by client and it does not perform any authentication.
+
+User name can be specified in HTTP header by
+
+* `x-ozone-user: {USER_NAME}`
+
+for example if add following header *x-ozone-user: bilbo* in the HTTP request,
+then operation will be executed as *bilbo* user.
+In *Simple* mode, there is no real authorization either. Client can be
+authorized to obtain administrator privilege by using HTTP header
+
+* `Authorization: {AUTH_METHOD} {SIGNATURE}`
+
+for example set following header *Authorization: OZONE root* in the HTTP request,
+then ozone will authorize the client with administrator privilege.
+
+Common REST Headers
+--------------------
+
+The following HTTP headers must be set for each REST call.
+
+| Property | Description |
+|:---- |:----
+| Authorization | The authorization field determines which authentication method is used by ozone. Currently only *simple* mode is supported, the corresponding value is *OZONE*. Optionally an user name can be set as *OZONE {USER_NAME}* to authorize as a particular user. |
+| Date | Standard HTTP header that represents dates. The format is - day of the week, month, day, year and time (military time format) in GMT. Any other time zone will be rejected by ozone server. Eg. *Date : Mon, Apr 4, 2016 06:22:00 GMT*. This field is required. |
+| x-ozone-version | A required HTTP header to indicate which version of API this call will be communicating to. E.g *x-ozone-version: v1*. Currently ozone only publishes v1 version API. |
+
+Common Reply Headers
+--------------------
+
+The common reply headers are part of all Ozone server replies.
+
+| Property | Description |
+|:---- |:----
+| Date | This is the HTTP date header and it is set to server’s local time expressed in GMT. |
+| x-ozone-request-id | This is a UUID string that represents an unique request ID. This ID is used to track the request through the ozone system and is useful for debugging purposes. |
+| x-ozone-server-name | Fully qualified domain name of the sever which handled the request. |
+
+Volume APIs
+--------------------
+
+### Create a Volume
+
+This API allows admins to create a new storage volume.
+
+Schema:
+
+- `POST /{volume}?quota=<VOLUME_QUOTA>`
+
+Query Parameter:
+
+| Query Parameter | Value | Description |
+|:---- |:---- |:----
+| quota | long<BYTES \| MB \| GB \| TB> | Optional. Quota size in BYTEs, MBs, GBs or TBs |
+
+Sample HTTP POST request:
+
+    curl -i -X POST -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root" "http://localhost:9880/volume-to-create"
+
+this request creates a volume as user *bilbo*, the authorization field is set to *OZONE root* because this call requires administration privilege. The client receives a response with zero content length.
+
+    HTTP/1.1 201 Created
+    x-ozone-server-name: localhost
+    x-ozone-request-id: 2173deb5-bbb7-4f0a-8236-f354784e3bae
+    Date: Tue, 27 Jun 2017 07:42:04 GMT
+    Content-Type: application/octet-stream
+    Content-Length: 0
+    Connection: keep-alive
+
+### Update Volume
+
+This API allows administrators to update volume info such as ownership and quota. This API requires administration privilege.
+
+Schema:
+
+- `PUT /{volume}?quota=<VOLUME_QUOTA>`
+
+Query Parameter:
+
+| Query Parameter | Value | Description |
+|:---- |:---- |:----
+| quota | long<BYTES \| MB \| GB \| TB>  \| remove | Optional. Quota size in BYTEs, MBs, GBs or TBs. Or use string value *remove* to remove an existing quota for a volume. |
+
+Sample HTTP PUT request:
+
+    curl -X PUT -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user: john"  http://localhost:9880/volume-to-update
+
+this request modifies the owner of */volume-to-update* to *john*.
+
+### Delete Volume
+
+This API allows user to delete a volume owned by themselves if the volume is not empty. Administrators can delete volumes owned by any user.
+
+Schema:
+
+- `DELETE /{volume}`
+
+Sample HTTP DELETE request:
+
+    curl -i -X DELETE -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user: bilbo"  http://localhost:9880/volume-to-delete
+
+this request deletes an empty volume */volume-to-delete*. The client receives a zero length content.
+
+    HTTP/1.1 200 OK
+    x-ozone-server-name: localhost
+    x-ozone-request-id: 6af14c64-e3a9-40fe-9634-df60b7cbbc6a
+    Date: Tue, 27 Jun 2017 08:49:52 GMT
+    Content-Type: application/octet-stream
+    Content-Length: 0
+    Connection: keep-alive
+
+### Info Volume
+
+This API allows user to read the info of a volume owned by themselves. Administrators can read volume info owned by any user.
+
+Schema:
+
+- `GET /{volume}?info=volume`
+
+Query Parameter:
+
+| Query Parameter | Value | Description |
+|:---- |:---- |:----
+| info | "volume" | Required and enforced with this value. |
+
+Sample HTTP GET request:
+
+    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo?info=volume"
+
+this request gets the info of volume */volume-of-bilbo*, the client receives a response with a JSON object of volume info.
+
+    HTTP/1.1 200 OK
+    x-ozone-server-name: localhost
+    x-ozone-request-id: a2224806-beaf-42dd-a68e-533cd7508f74
+    Date: Tue, 27 Jun 2017 07:55:35 GMT
+    Content-Type: application/octet-stream
+    Content-Length: 171
+    Connection: keep-alive
+
+    {
+      "owner" : { "name" : "bilbo" },
+      "quota" : { "unit" : "TB", "size" : 1048576 },
+      "volumeName" : "volume-of-bilbo",
+      "createdOn" : "Tue, 27 Jun 2017 07:42:04 GMT",
+      "createdBy" : "root"
+    }
+
+### List Volumes
+
+This API allows user to list all volumes owned by themselves. Administrators can list all volumes owned by any user.
+
+Schema:
+
+- `GET /?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<PREVIOUS_VOLUME_KEY>`
+
+Query Parameter:
+
+| Query Parameter | Value | Description |
+|:---- |:---- |:----
+| prefix | string | Optional. Only volumes with this prefix are included in the result. |
+| max-keys | int | Optional. Maximum number of volumes included in the result. Default is 1024 if not specified. |
+| prev-key | string | Optional. Volume name from where listing should start, this key is excluded in the result. It must be a valid volume name. |
+| root-scan | bool | Optional. List all volumes in the cluster if this is set to true. Default false. |
+
+Sample HTTP GET request:
+
+    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/?max-keys=100&prefix=Jan"
+
+this request gets all volumes owned by *bilbo* and each volume's name contains prefix *Jan*, the result at most contains *100* entries. The client receives a list of SON objects, each of them describes the info of a volume.
+
+    HTTP/1.1 200 OK
+    x-ozone-server-name: localhost
+    x-ozone-request-id: 7fa0dce1-a8bd-4387-bc3c-1dac4b710bb1
+    Date: Tue, 27 Jun 2017 08:07:04 GMT
+    Content-Type: application/octet-stream
+    Content-Length: 602
+    Connection: keep-alive
+
+    {
+      "volumes" : [
+        {
+          "owner" : { "name" : "bilbo"},
+          "quota" : { "unit" : "TB", "size" : 2 },
+          "volumeName" : "Jan-vol1",
+          "createdOn" : "Tue, 27 Jun 2017 07:42:04 GMT",
+          "createdBy" : root
+      },
+      ...
+      ]
+    }
+
+Bucket APIs
+--------------------
+
+### Create Bucket
+
+This API allows an user to create a bucket in a volume.
+
+Schema:
+
+- `POST /{volume}/{bucket}`
+
+Additional HTTP Headers:
+
+| HTTP Header | Value | Description |
+|:---- |:---- |:----
+| x-ozone-acl | ozone ACLs | Optional. Ozone acls. |
+| x-ozone-storage-class | <DEFAULT \| ARCHIVE \| DISK \| RAM_DISK \| SSD > | Optional. Storage type for a volume. |
+| x-ozone-bucket-versioning | enabled/disabled | Optional. Do enable bucket versioning or not. |
+
+Sample HTTP POST request:
+
+    curl -i -X POST -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" http://localhost:9880/volume-of-bilbo/bucket-0
+
+this request creates a bucket *bucket-0* under volume *volume-of-bilbo*.
+
+    HTTP/1.1 201 Created
+    x-ozone-server-name: localhost
+    x-ozone-request-id: 49acfeec-4c85-470a-872b-2eaebd8d751e
+    Date: Tue, 27 Jun 2017 08:55:25 GMT
+    Content-Type: application/octet-stream
+    Content-Length: 0
+    Connection: keep-alive
+
+### Update Bucket
+
+Updates bucket meta-data, like ACLs.
+
+Schema:
+
+- `PUT /{volume}/{bucket}`
+
+Additional HTTP Headers:
+
+| HTTP Header | Value | Description |
+|:---- |:---- |:----
+| x-ozone-acl | ozone ACLs | Optional. Ozone acls. |
+| x-ozone-bucket-versioning | enabled/disabled | Optional. Do enable bucket versioning or not. |
+
+Sample HTTP PUT request:
+
+    curl -i -X PUT -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" -H "x-ozone-acl: ADD user:peregrin:rw" http://localhost:9880/volume-of-bilbo/bucket-to-update
+
+this request adds an ACL policy specified by HTTP header *x-ozone-acl* to bucket */volume-of-bilbo/bucket-to-update*, the ACL field *ADD user:peregrin:rw* gives add additional read/write permission to user *peregrin* to this bucket.
+
+    HTTP/1.1 200 OK
+    x-ozone-server-name: localhost
+    x-ozone-request-id: b061a295-5faf-4b98-94b9-8b3e87c8eb5e
+    Date: Tue, 27 Jun 2017 09:02:37 GMT
+    Content-Type: application/octet-stream
+    Content-Length: 0
+    Connection: keep-alive
+
+### Delete Bucket
+
+Deletes a bucket if it is empty. An user can only delete bucket owned by themselves, and administrators can delete buckets owned by any user, as long as it is empty.
+
+Schema:
+
+- `DELETE /{volume}/{bucket}`
+
+Sample HTTP DELETE request:
+
+    curl -i -X DELETE -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user:bilbo" "http://localhost:9880/volume-of-bilbo/bucket-0"
+
+this request deletes bucket */volume-of-bilbo/bucket-0*. The client receives a zero length content response.
+
+    HTTP/1.1 200 OK
+    x-ozone-server-name: localhost
+    x-ozone-request-id: f57acd7a-2116-4c2f-aa2f-5a483db81c9c
+    Date: Tue, 27 Jun 2017 09:16:52 GMT
+    Content-Type: application/octet-stream
+    Content-Length: 0
+    Connection: keep-alive
+
+
+### Info Bucket
+
+This API returns information about a given bucket.
+
+Schema:
+
+- `GET /{volume}/{bucket}?info=bucket`
+
+Query Parameters:
+
+| Query Parameter | Value | Description |
+|:---- |:---- |:----
+| info | "bucket" | Required and enforced with this value. |
+
+Sample HTTP GET request:
+
+    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo/bucket-0?info=bucket"
+
+this request gets the info of bucket */volume-of-bilbo/bucket-0*. The client receives a response of JSON object contains bucket info.
+
+    HTTP/1.1 200 OK
+    x-ozone-server-name: localhost
+    x-ozone-request-id: f125485b-8cae-4c7f-a2d6-5b1fefd6f193
+    Date: Tue, 27 Jun 2017 09:08:31 GMT
+    Content-Type: application/json
+    Content-Length: 138
+    Connection: keep-alive
+
+    {
+      "volumeName" : "volume-of-bilbo",
+      "bucketName" : "bucket-0",
+      "createdOn" : "Tue, 27 Jun 2017 08:55:25 GMT",
+      "acls" : [ ],
+      "versioning" : "DISABLED",
+      "storageType" : "DISK"
+    }
+
+### List Buckets
+
+List buckets in a given volume.
+
+Schema:
+
+- `GET /{volume}?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<PREVIOUS_BUCKET_KEY>`
+
+Query Parameters:
+
+| Query Parameter | Value | Description |
+|:---- |:---- |:----
+| prefix | string | Optional. Only buckets with this prefix are included in the result. |
+| max-keys | int | Optional. Maximum number of buckets included in the result. Default is 1024 if not specified. |
+| prev-key | string | Optional. Bucket name from where listing should start, this key is excluded in the result. It must be a valid bucket name. |
+
+Sample HTTP GET request:
+
+    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo?max-keys=10"
+
+this request lists all the buckets under volume *volume-of-bilbo*, and the result at most contains 10 entries. The client receives response of a array of JSON objects, each of them represents for a bucket info.
+
+    HTTP/1.1 200 OK
+    x-ozone-server-name: localhost
+    x-ozone-request-id: e048c3d5-169c-470f-9903-632d9f9e32d5
+    Date: Tue, 27 Jun 2017 09:12:18 GMT
+    Content-Type: application/octet-stream
+    Content-Length: 207
+    Connection: keep-alive
+
+    {
+      "buckets" : [ {
+        "volumeName" : "volume-of-bilbo",
+        "bucketName" : "bucket-0",
+        "createdOn" : "Tue, 27 Jun 2017 08:55:25 GMT",
+        "acls" : [ ],
+        "versioning" : null,
+        "storageType" : "DISK",
+        "bytesUsed" : 0,
+        "keyCount" : 0
+        },
+        ...
+      ]
+    }
+
+Key APIs
+------------------
+
+### Put Key
+
+This API allows user to create or overwrite keys inside of a bucket.
+
+Schema:
+
+- `PUT /{volume}/{bucket}/{key}`
+
+Additional HTTP headers:
+
+| HTTP Header | Value | Description |
+|:---- |:---- |:----
+| Content-MD5 | MD5 digest | Standard HTTP header, file hash. |
+
+Sample PUT HTTP request:
+
+    curl -X PUT -T /path/to/localfile -H "Authorization:OZONE" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user:bilbo" "http://localhost:9880/volume-of-bilbo/bucket-0/file-0"
+
+this request uploads a local file */path/to/localfile* specified by option *-T* to ozone as user *bilbo*, mapped to ozone key */volume-of-bilbo/bucket-0/file-0*. The client receives a zero length content response.
+
+### Get Key
+
+This API allows user to get or download a key from an ozone bucket.
+
+Schema:
+
+- `GET /{volume}/{bucket}/{key}`
+
+Sample HTTP GET request:
+
+    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo/bucket-0/file-0"
+
+this request reads the content of key */volume-of-bilbo/bucket-0/file-0*. If the content of the file is plain text, it can be directly dumped onto stdout.
+
+    HTTP/1.1 200 OK
+    Content-Type: application/octet-stream
+    x-ozone-server-name: localhost
+    x-ozone-request-id: 1bcd7de7-d8e3-46bb-afee-bdc933d383b8
+    Date: Tue, 27 Jun 2017 09:35:29 GMT
+    Content-Length: 6
+    Connection: keep-alive
+
+    Hello Ozone!
+
+if the file is not plain text, specify *-O* option in curl command and the file *file-0* will be downloaded into current working directory, file name will be same as the key. A sample request like following:
+
+    curl -O -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo/bucket-0/file-1"
+
+response looks like following:
+
+    % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
+                                 Dload  Upload   Total   Spent    Left  Speed
+    100 6148k  100 6148k    0     0  24.0M      0 --:--:-- --:--:-- --:--:-- 24.1M
+
+### Delete Key
+
+This API allows user to delete a key from a bucket.
+
+Schema:
+
+- `DELETE /{volume}/{bucket}/{key}`
+
+Sample HTTP DELETE request:
+
+    curl -i -X DELETE -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user:bilbo" "http://localhost:9880/volume-of-bilbo/bucket-0/file-0"
+
+this request deletes key */volume-of-bilbo/bucket-0/file-0*. The client receives a zero length content result:
+
+    HTTP/1.1 200 OK
+    x-ozone-server-name: localhost
+    x-ozone-request-id: f8c4a373-dd5f-4e3a-b6c4-ddf7e191fe91
+    Date: Tue, 27 Jun 2017 14:19:48 GMT
+    Content-Type: application/octet-stream
+    Content-Length: 0
+    Connection: keep-alive
+
+### Info Key
+
+This API returns information about a given key.
+
+Schema:
+
+- `GET /{volume}/{bucket}/{key}?info=key`
+
+Query Parameter:
+
+| Query Parameter | Value | Description |
+|:---- |:---- |:----
+| info | String, "key" | Required and enforced with this value. |
+
+Sample HTTP DELETE request:
+
+    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http://localhost:9880/volume-of-bilbo/buket-0/file-0?info=key"
+
+this request returns information of the key */volume-of-bilbo/bucket-0/file-0*. The client receives a JSON object listed attributes of the key.
+
+    HTTP/1.1 200 OK
+    x-ozone-server-name: localhost
+    x-ozone-request-id: c674343c-a0f2-49e4-bbd6-daa73e7dc131
+    Date: Mon, 03 Jul 2017 14:28:45 GMT
+    Content-Type: application/octet-stream
+    Content-Length: 73
+    Connection: keep-alive
+
+    {
+      "version" : 0,
+      "md5hash" : null,
+      "createdOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
+      "modifiedOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
+      "size" : 0,
+      "keyName" : "file-0"
+    }
+
+### List Keys
+
+This API allows user to list keys in a bucket.
+
+Schema:
+
+- `GET /{volume}/{bucket}?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<PREVIOUS_KEY>`
+
+Query Parameters:
+
+| Query Parameter | Value | Description |
+|:---- |:---- |:----
+| prefix | string | Optional. Only keys with this prefix are included in the result. |
+| max-keys | int | Optional. Maximum number of keys included in the result. Default is 1024 if not specified. |
+| prev-key | string | Optional. Key name from where listing should start, this key is excluded in the result. It must be a valid key name. |
+
+Sample HTTP GET request:
+
+    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" "http:/localhost:9880/volume-of-bilbo/bucket-0/?max-keys=100&prefix=file"
+
+this request list keys under bucket */volume-of-bilbo/bucket-0*, the listing result is filtered by prefix *file*. The client receives an array of JSON objects, each of them represents the info of a matched key.
+
+    HTTP/1.1 200 OK
+    x-ozone-server-name: localhost
+    x-ozone-request-id: 7f9fc970-9904-4c56-b671-83a086c6f555
+    Date: Tue, 27 Jun 2017 09:48:59 GMT
+    Content-Type: application/json
+    Content-Length: 209
+    Connection: keep-alive
+
+    {
+      "name" : null,
+      "prefix" : file,
+      "maxKeys" : 0,
+      "truncated" : false,
+      "keyList" : [ {
+          "version" : 0,
+          "md5hash" : null,
+          "createdOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
+          "modifiedOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
+          "size" : 0,
+          "keyName" : "file-0"
+          },
+          ...
+       ]
+    }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/content/_index.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/_index.md b/hadoop-ozone/docs/content/_index.md
new file mode 100644
index 0000000..ab7eabe
--- /dev/null
+++ b/hadoop-ozone/docs/content/_index.md
@@ -0,0 +1,102 @@
+---
+title: Ozone Overview
+menu: main
+weight: -10
+---
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+
Ozone is an Object store for Apache Hadoop. It aims to scale to billions of
+keys. 
The following is a high-level overview of the core components of Ozone.


+
+![Ozone Architecture Overview](./OzoneOverview.png) 


+
+The main elements of Ozone are
:
+
+## Clients
+
+Ozone ships with a set of ready-made clients. They are 
Ozone CLI and Freon.

+
+ * [Ozone CLI](./OzoneCommandShell.html) is the command line interface like 'hdfs' command.

+
+ * Freon is a  load generation tool for Ozone.

+
+## REST Handler
+
+Ozone provides both an RPC (Remote Procedure Call) as well as a  REST
+(Representational State Transfer) style interface. This allows clients to be
+written in many languages quickly. Ozone strives to maintain a similar
+interface between REST and RPC. The Rest handler offers the REST protocol
+services of Ozone.
+
+For most purposes, a client can make one line change to switch from REST to
+RPC or vice versa.  

+
+## Ozone File System
+
+Ozone file system (TODO: Add documentation) is a Hadoop compatible file system.
+This is the important user-visible component of ozone.
+This allows Hadoop services and applications like Hive/Spark to run against
+Ozone without any change.
+
+## Ozone Client
+
+This is like DFSClient in HDFS. This acts as the standard client to talk to
+Ozone. All other components that we have discussed so far rely on Ozone client
+(TODO: Add Ozone client documentation).

+
+## Key Space Manager

+
+Key Space Manager(KSM) takes care of the Ozone's namespace.
+All ozone entities like volumes, buckets and keys are managed by KSM
+(TODO: Add KSM documentation). In Short, KSM is the metadata manager for Ozone.
+KSM talks to blockManager(SCM) to get blocks and passes it on to the Ozone
+client.  Ozone client writes data to these blocks.
+KSM will eventually be replicated via Apache Ratis for High Availability.

+
+## Storage Container Manager
+Storage Container Manager (SCM) is the block and cluster manager for Ozone.
+SCM along with data nodes offer a service called 'containers'.
+A container is a group unrelated of blocks that are managed together
+as a single entity.
+
+SCM offers the following abstractions.


+
+![SCM Abstractions](../SCMBlockDiagram.png)
+
+### Blocks
+
+Blocks are like blocks in HDFS. They are replicated store of data.
+
+### Containers
+
+A collection of blocks replicated and managed together.
+
+### Pipelines
+
+SCM allows each container to choose its method of replication.
+For example, a container might decide that it needs only one copy of a  block
+and might choose a stand-alone pipeline. Another container might want to have
+a very high level of reliability and pick a RATIS based pipeline. In other
+words, SCM allows different kinds of replication strategies to co-exist.
+
+### Pools
+
+A group of data nodes is called a pool. For scaling purposes,
+we define a pool as a set of machines. This makes management of datanodes
+easier.
+
+### Nodes
+
+The data node where data is stored.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/dev-support/bin/generate-site.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/dev-support/bin/generate-site.sh b/hadoop-ozone/docs/dev-support/bin/generate-site.sh
new file mode 100755
index 0000000..3323935
--- /dev/null
+++ b/hadoop-ozone/docs/dev-support/bin/generate-site.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+DOCDIR="$DIR/../.."
+
+if [ ! "$(which hugo)" ]; then
+   echo "Hugo is not yet installed. Doc generation is skipped."
+   exit -1
+fi
+
+DESTDIR="$DOCDIR/target/classes/webapps/docs"
+mkdir -p "$DESTDIR"
+cd "$DOCDIR"
+hugo -d "$DESTDIR" "$@"
+cd -

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/pom.xml b/hadoop-ozone/docs/pom.xml
new file mode 100644
index 0000000..e0f9a87
--- /dev/null
+++ b/hadoop-ozone/docs/pom.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-ozone</artifactId>
+    <version>0.2.1-SNAPSHOT</version>
+  </parent>
+  <artifactId>hadoop-ozone-docs</artifactId>
+  <version>0.2.1-SNAPSHOT</version>
+  <description>Apache Hadoop Ozone Documentation</description>
+  <name>Apache Hadoop Ozone Documentation</name>
+  <packaging>jar</packaging>
+
+  <dependencies>
+
+  </dependencies>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>exec-maven-plugin</artifactId>
+        <version>1.6.0</version>
+        <executions>
+          <execution>
+            <goals>
+              <goal>exec</goal>
+            </goals>
+            <phase>compile</phase>
+          </execution>
+        </executions>
+        <configuration>
+          <executable>dev-support/bin/generate-site.sh</executable>
+          <arguments>
+            <argument>-b</argument>
+            <argument>/docs</argument>
+          </arguments>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/static/OzoneOverview.png
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/static/OzoneOverview.png b/hadoop-ozone/docs/static/OzoneOverview.png
new file mode 100644
index 0000000..7e011d5
Binary files /dev/null and b/hadoop-ozone/docs/static/OzoneOverview.png differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/static/OzoneOverview.svg
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/static/OzoneOverview.svg b/hadoop-ozone/docs/static/OzoneOverview.svg
new file mode 100644
index 0000000..2e14d3f
--- /dev/null
+++ b/hadoop-ozone/docs/static/OzoneOverview.svg
@@ -0,0 +1,225 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<svg width="703px" height="465px" viewBox="0 0 703 465" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
+    <!-- Generator: Sketch 44.1 (41455) - http://www.bohemiancoding.com/sketch -->
+    <title>Desktop HD</title>
+    <desc>Created with Sketch.</desc>
+    <defs>
+        <rect id="path-1" x="0" y="0" width="131" height="36" rx="8"></rect>
+        <rect id="path-2" x="0" y="0" width="131" height="36" rx="8"></rect>
+        <rect id="path-3" x="9" y="304" width="437" height="144"></rect>
+        <mask id="mask-4" maskContentUnits="userSpaceOnUse" maskUnits="objectBoundingBox" x="0" y="0" width="437" height="144" fill="white">
+            <use xlink:href="#path-3"></use>
+        </mask>
+        <rect id="path-5" x="0" y="0" width="123.06741" height="40.4358016" rx="8"></rect>
+        <rect id="path-6" x="3.26727637" y="4.49286685" width="123.06741" height="40.4358016" rx="8"></rect>
+        <rect id="path-7" x="7.07909881" y="11.2321671" width="123.06741" height="40.4358016" rx="8"></rect>
+        <rect id="path-8" x="0" y="0" width="123.06741" height="40.4358016" rx="8"></rect>
+        <rect id="path-9" x="3.26727637" y="4.49286685" width="123.06741" height="40.4358016" rx="8"></rect>
+        <rect id="path-10" x="7.07909881" y="11.2321671" width="123.06741" height="40.4358016" rx="8"></rect>
+        <rect id="path-11" x="0" y="0" width="123.06741" height="40.4358016" rx="8"></rect>
+        <rect id="path-12" x="3.26727637" y="4.49286685" width="123.06741" height="40.4358016" rx="8"></rect>
+        <rect id="path-13" x="7.07909881" y="11.2321671" width="123.06741" height="40.4358016" rx="8"></rect>
+        <rect id="path-14" x="0" y="14.7446809" width="97" height="36.1914894"></rect>
+        <rect id="path-15" x="0" y="14.7446809" width="97" height="36.1914894"></rect>
+        <rect id="path-16" x="0" y="0" width="131" height="36" rx="8"></rect>
+        <rect id="path-17" x="0" y="0" width="131" height="36" rx="8"></rect>
+        <rect id="path-18" x="0.140758874" y="0" width="142.859241" height="35.1071084" rx="8"></rect>
+        <rect id="path-19" x="0" y="0" width="226" height="36" rx="8"></rect>
+        <rect id="path-20" x="6" y="4" width="226" height="36" rx="8"></rect>
+        <rect id="path-21" x="13" y="10" width="226" height="36" rx="8"></rect>
+        <rect id="path-22" x="0" y="0" width="226" height="36" rx="8"></rect>
+        <rect id="path-23" x="6" y="4" width="226" height="36" rx="8"></rect>
+        <rect id="path-24" x="13" y="10" width="226" height="36" rx="8"></rect>
+        <rect id="path-25" x="0" y="14.7446809" width="97" height="36.1914894"></rect>
+    </defs>
+    <g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
+        <path d="M84.5,51.5 L240.5,130.5" id="Line" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
+        <path id="Line-decoration-1" d="M240.5,130.5 L232.220366,122.944362 L229.50967,128.29713 L240.5,130.5 Z" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
+        <path d="M142.5,150.5 L177.5,150.5" id="Line" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
+        <path id="Line-decoration-1" d="M177.5,150.5 L166.7,147.5 L166.7,153.5 L177.5,150.5 Z" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
+        <g id="Desktop-HD">
+            <g id="Client" transform="translate(176.000000, 132.000000)">
+                <g id="Rectangle">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-1"></use>
+                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="130" height="35" rx="8"></rect>
+                </g>
+                <text id="Ozone-Client" font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
+                    <tspan x="24.2107393" y="24">Ozone Client</tspan>
+                </text>
+            </g>
+            <g id="Handler" transform="translate(35.000000, 18.000000)">
+                <g id="Rectangle-2">
+                    <use fill="#E4D6F8" fill-rule="evenodd" xlink:href="#path-2"></use>
+                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="130" height="35" rx="8"></rect>
+                </g>
+                <text id="Rest-Handler" font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
+                    <tspan x="22.3208008" y="22">Rest Handler</tspan>
+                </text>
+            </g>
+            <use id="Rectangle-4" stroke="#979797" mask="url(#mask-4)" stroke-width="2" fill="#FFFFFF" stroke-dasharray="1,3,1,3" xlink:href="#path-3"></use>
+            <g id="Ratis" transform="translate(315.000000, 378.000000)">
+                <g id="Rectangle-3">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-5"></use>
+                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="122.06741" height="39.4358016" rx="8"></rect>
+                </g>
+                <g id="Rectangle-3">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-6"></use>
+                    <rect stroke="#000000" stroke-width="1" x="3.76727637" y="4.99286685" width="122.06741" height="39.4358016" rx="8"></rect>
+                </g>
+                <g id="Rectangle-3">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-7"></use>
+                    <rect stroke="#000000" stroke-width="1" x="7.57909881" y="11.7321671" width="122.06741" height="39.4358016" rx="8"></rect>
+                </g>
+                <text font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
+                    <tspan x="51.3828125" y="35.9642655">Ratis</tspan>
+                </text>
+            </g>
+            <g id="Ratis" transform="translate(166.000000, 378.000000)">
+                <g id="Rectangle-3">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-8"></use>
+                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="122.06741" height="39.4358016" rx="8"></rect>
+                </g>
+                <g id="Rectangle-3">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-9"></use>
+                    <rect stroke="#000000" stroke-width="1" x="3.76727637" y="4.99286685" width="122.06741" height="39.4358016" rx="8"></rect>
+                </g>
+                <g id="Rectangle-3">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-10"></use>
+                    <rect stroke="#000000" stroke-width="1" x="7.57909881" y="11.7321671" width="122.06741" height="39.4358016" rx="8"></rect>
+                </g>
+                <text font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
+                    <tspan x="51.3828125" y="35.9642655">Ratis</tspan>
+                </text>
+            </g>
+            <g id="Ratis" transform="translate(10.000000, 378.000000)">
+                <g id="Rectangle-3">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-11"></use>
+                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="122.06741" height="39.4358016" rx="8"></rect>
+                </g>
+                <g id="Rectangle-3">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-12"></use>
+                    <rect stroke="#000000" stroke-width="1" x="3.76727637" y="4.99286685" width="122.06741" height="39.4358016" rx="8"></rect>
+                </g>
+                <g id="Rectangle-3">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-13"></use>
+                    <rect stroke="#000000" stroke-width="1" x="7.57909881" y="11.7321671" width="122.06741" height="39.4358016" rx="8"></rect>
+                </g>
+                <text font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
+                    <tspan x="51.3828125" y="35.9642655">Ratis</tspan>
+                </text>
+            </g>
+            <path d="M240.5,168.5 L240.5,311.5" id="Line" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
+            <path id="Line-decoration-1" d="M240.5,311.5 L243.5,300.7 L237.5,300.7 L240.5,311.5 Z" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
+            <path d="M243.5,54.5 L243.5,131.5" id="Line" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
+            <path id="Line-decoration-1" d="M243.5,131.5 L246.5,120.7 L240.5,120.7 L243.5,131.5 Z" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
+            <g id="Container" transform="translate(328.000000, 313.000000)">
+                <g id="Rectangle-5">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-14"></use>
+                    <rect stroke="#000000" stroke-width="1" x="0.5" y="15.2446809" width="96" height="35.1914894"></rect>
+                </g>
+                <ellipse id="Oval" stroke="#000000" fill="#C6D4F9" cx="48.5" cy="11.393617" rx="48.5" ry="11.393617"></ellipse>
+                <ellipse id="Oval" stroke="#000000" fill="#C6D4F9" cx="48.5" cy="51.606383" rx="48.5" ry="11.393617"></ellipse>
+                <text font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
+                    <tspan x="14.3310547" y="37">Container</tspan>
+                </text>
+            </g>
+            <g id="Container" transform="translate(176.000000, 312.000000)">
+                <g id="Rectangle-5">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-15"></use>
+                    <rect stroke="#000000" stroke-width="1" x="0.5" y="15.2446809" width="96" height="35.1914894"></rect>
+                </g>
+                <ellipse id="Oval" stroke="#000000" fill="#C6D4F9" cx="48.5" cy="11.393617" rx="48.5" ry="11.393617"></ellipse>
+                <ellipse id="Oval" stroke="#000000" fill="#C6D4F9" cx="48.5" cy="51.606383" rx="48.5" ry="11.393617"></ellipse>
+                <text font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
+                    <tspan x="14.3310547" y="37">Container</tspan>
+                </text>
+            </g>
+            <g id="FileSystem" transform="translate(11.000000, 133.000000)">
+                <g id="Rectangle-2">
+                    <use fill="#7ED321" fill-rule="evenodd" xlink:href="#path-16"></use>
+                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="130" height="35" rx="8"></rect>
+                </g>
+                <text id="Ozone-File-System" font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
+                    <tspan x="3.25878906" y="22">Ozone File System</tspan>
+                </text>
+            </g>
+            <g id="CLI" transform="translate(179.000000, 18.000000)">
+                <g id="Rectangle-2">
+                    <use fill="#E4D6F8" fill-rule="evenodd" xlink:href="#path-17"></use>
+                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="130" height="35" rx="8"></rect>
+                </g>
+                <text id="Ozone-CLI" font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
+                    <tspan x="26.3896484" y="23">Ozone CLI</tspan>
+                </text>
+            </g>
+            <path d="M333.336323,48.7787611 L248.494492,130.227891" id="Line" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
+            <path id="Line-decoration-1" d="M248.494492,130.227891 L258.363039,124.91265 L254.207822,120.584351 L248.494492,130.227891 Z" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
+            <g id="Corona" transform="translate(325.000000, 17.000000)">
+                <g id="Rectangle-2">
+                    <use fill="#E4D6F8" fill-rule="evenodd" xlink:href="#path-18"></use>
+                    <rect stroke="#000000" stroke-width="1" x="0.640758874" y="0.5" width="141.859241" height="34.1071084" rx="8"></rect>
+                </g>
+                <text id="Freon" font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
+                    <tspan x="50.7326544" y="22.8128606">Freon</tspan>
+                </text>
+            </g>
+            <path d="M307.5,148.5 L433.5,148.5" id="Line" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
+            <path id="Line-decoration-1" d="M433.5,148.5 L422.7,145.5 L422.7,151.5 L433.5,148.5 Z" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
+            <path d="M4,232 L699,232" id="Line" stroke="#000000" stroke-width="2" stroke-linecap="square" stroke-dasharray="5,2,5"></path>
+            <g id="KSM" transform="translate(432.000000, 132.000000)">
+                <g id="Rectangle-3">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-19"></use>
+                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="225" height="35" rx="8"></rect>
+                </g>
+                <g id="Rectangle-3">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-20"></use>
+                    <rect stroke="#000000" stroke-width="1" x="6.5" y="4.5" width="225" height="35" rx="8"></rect>
+                </g>
+                <g id="Rectangle-3">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-21"></use>
+                    <rect stroke="#000000" stroke-width="1" x="13.5" y="10.5" width="225" height="35" rx="8"></rect>
+                </g>
+                <text id="Ozone-Manager" font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
+                    <tspan x="67.3793945" y="32">Ozone Manager</tspan>
+                </text>
+            </g>
+            <g id="SCM" transform="translate(450.000000, 281.000000)">
+                <g id="Rectangle-3">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-22"></use>
+                    <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="225" height="35" rx="8"></rect>
+                </g>
+                <g id="Rectangle-3">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-23"></use>
+                    <rect stroke="#000000" stroke-width="1" x="6.5" y="4.5" width="225" height="35" rx="8"></rect>
+                </g>
+                <g id="Rectangle-3">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-24"></use>
+                    <rect stroke="#000000" stroke-width="1" x="13.5" y="10.5" width="225" height="35" rx="8"></rect>
+                </g>
+                <text id="Storage-Container-Manager" font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
+                    <tspan x="28.0932617" y="32">Storage Container Manager</tspan>
+                </text>
+            </g>
+            <path d="M534.5,178.5 L534.5,283.5" id="Line" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
+            <path id="Line-decoration-1" d="M534.5,283.5 L537.5,272.7 L531.5,272.7 L534.5,283.5 Z" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
+            <text id="Datanodes" font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
+                <tspan x="196.213867" y="462">Datanodes</tspan>
+            </text>
+            <g id="Container" transform="translate(15.000000, 311.000000)">
+                <g id="Rectangle-5">
+                    <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-25"></use>
+                    <rect stroke="#000000" stroke-width="1" x="0.5" y="15.2446809" width="96" height="35.1914894"></rect>
+                </g>
+                <ellipse id="Oval" stroke="#000000" fill="#C6D4F9" cx="48.5" cy="11.393617" rx="48.5" ry="11.393617"></ellipse>
+                <ellipse id="Oval" stroke="#000000" fill="#C6D4F9" cx="48.5" cy="51.606383" rx="48.5" ry="11.393617"></ellipse>
+                <text font-family="Helvetica-Bold, Helvetica" font-size="14" font-weight="bold" fill="#000000">
+                    <tspan x="14.3310547" y="37">Container</tspan>
+                </text>
+            </g>
+            <path d="M84.5,137.5 L86,139" id="Line" stroke="#979797" stroke-linecap="square"></path>
+            <text id="Hadoop-Distributed-D" font-family="Helvetica-Bold, Helvetica" font-size="20" font-weight="bold" fill="#000000">
+                <tspan x="205.433594" y="230">Hadoop Distributed Data Store</tspan>
+            </text>
+        </g>
+    </g>
+</svg>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/static/SCMBlockDiagram.png
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/static/SCMBlockDiagram.png b/hadoop-ozone/docs/static/SCMBlockDiagram.png
new file mode 100644
index 0000000..04d27ad
Binary files /dev/null and b/hadoop-ozone/docs/static/SCMBlockDiagram.png differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/layouts/_default/single.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/layouts/_default/single.html b/hadoop-ozone/docs/themes/ozonedoc/layouts/_default/single.html
new file mode 100644
index 0000000..0fdd1ba
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/layouts/_default/single.html
@@ -0,0 +1,32 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+{{ partial "header.html" . }}
+
+  <body>
+
+{{ partial "navbar.html" . }}
+
+    <div class="container-fluid">
+      <div class="row">
+        {{ partial "sidebar.html" . }}
+        <div class="col-sm-9 col-sm-offset-3 col-md-10 col-md-offset-2 main">
+              {{ .Content }}
+        </div>
+      </div>
+    </div>
+
+{{ partial "footer.html" . }}
+
+  </body>
+</html>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/layouts/index.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/layouts/index.html b/hadoop-ozone/docs/themes/ozonedoc/layouts/index.html
new file mode 100644
index 0000000..c2c8cd0
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/layouts/index.html
@@ -0,0 +1,21 @@
+{{ partial "header.html" . }}
+
+  <body>
+
+{{ partial "navbar.html" . }}
+
+    <div class="container-fluid">
+      <div class="row">
+        {{ partial "sidebar.html" . }}
+        <div class="col-sm-9 col-sm-offset-3 col-md-10 col-md-offset-2 main">
+                    {{ .Content }}
+
+
+        </div>
+      </div>
+    </div>
+
+{{ partial "footer.html" . }}
+
+  </body>
+</html>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/footer.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/footer.html b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/footer.html
new file mode 100644
index 0000000..4aa5b59
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/footer.html
@@ -0,0 +1,19 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<!-- Bootstrap core JavaScript
+================================================== -->
+<!-- Placed at the end of the document so the pages load faster -->
+<script src="{{ "js/jquery.min.js" | relURL}}"></script>
+<script src="{{ "js/ozonedoc.js" | relURL}}"></script>
+<script src="{{ "js/bootstrap.min.js" | relURL}}"></script>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/header.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/header.html b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/header.html
new file mode 100644
index 0000000..c1f47a9
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/header.html
@@ -0,0 +1,31 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<!DOCTYPE html>
+<html lang="en">
+  <head>
+    <meta charset="utf-8">
+    <meta http-equiv="X-UA-Compatible" content="IE=edge">
+    <meta name="viewport" content="width=device-width, initial-scale=1">
+    <!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
+    <meta name="description" content="Hadoop Ozone Documentation">
+
+    <title>Documentation for Apache Hadoop Ozone</title>
+
+    <!-- Bootstrap core CSS -->
+    <link href="{{ "css/bootstrap.min.css" | relURL }}" rel="stylesheet">
+
+    <!-- Custom styles for this template -->
+    <link href="{{ "css/ozonedoc.css" | relURL }}" rel="stylesheet">
+
+  </head>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: HDDS-82. Merge ContainerData and ContainerStatus classes. Contributed by Bharat Viswanadham.

Posted by ar...@apache.org.
HDDS-82. Merge ContainerData and ContainerStatus classes. Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e881267
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e881267
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e881267

Branch: refs/heads/HDDS-48
Commit: 5e88126776e6d682a48f737d8ab1ad0e04d3e767
Parents: 0b4c44b
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Mon May 21 16:09:24 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 21 16:09:24 2018 -0700

----------------------------------------------------------------------
 .../main/proto/DatanodeContainerProtocol.proto  |   1 +
 .../container/common/helpers/ContainerData.java | 163 +++++++++++++-
 .../common/impl/ContainerManagerImpl.java       | 144 ++++++------
 .../container/common/impl/ContainerStatus.java  | 217 -------------------
 .../RandomContainerDeletionChoosingPolicy.java  |  10 +-
 ...NOrderedContainerDeletionChoosingPolicy.java |  20 +-
 .../ContainerDeletionChoosingPolicy.java        |   3 +-
 .../common/impl/TestContainerPersistence.java   |  19 +-
 8 files changed, 257 insertions(+), 320 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e881267/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index e7e5b2b..95b7cbb 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -152,6 +152,7 @@ enum ContainerLifeCycleState {
     OPEN = 1;
     CLOSING = 2;
     CLOSED = 3;
+    INVALID = 4;
 }
 
 message ContainerCommandRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e881267/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
index 2a079b0..14ee33a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
@@ -52,6 +52,17 @@ public class ContainerData {
   private ContainerType containerType;
   private String containerDBType;
 
+
+  /**
+   * Number of pending deletion blocks in container.
+   */
+  private int numPendingDeletionBlocks;
+  private AtomicLong readBytes;
+  private AtomicLong writeBytes;
+  private AtomicLong readCount;
+  private AtomicLong writeCount;
+
+
   /**
    * Constructs a  ContainerData Object.
    *
@@ -66,6 +77,34 @@ public class ContainerData {
     this.bytesUsed =  new AtomicLong(0L);
     this.containerID = containerID;
     this.state = ContainerLifeCycleState.OPEN;
+    this.numPendingDeletionBlocks = 0;
+    this.readCount = new AtomicLong(0L);
+    this.readBytes =  new AtomicLong(0L);
+    this.writeCount =  new AtomicLong(0L);
+    this.writeBytes =  new AtomicLong(0L);
+  }
+
+  /**
+   * Constructs a  ContainerData Object.
+   *
+   * @param containerID - ID
+   * @param conf - Configuration
+   * @param state - ContainerLifeCycleState
+   * @param
+   */
+  public ContainerData(long containerID, Configuration conf,
+                       ContainerLifeCycleState state) {
+    this.metadata = new TreeMap<>();
+    this.maxSize = conf.getLong(ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_SIZE_KEY,
+        ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT) * OzoneConsts.GB;
+    this.bytesUsed =  new AtomicLong(0L);
+    this.containerID = containerID;
+    this.state = state;
+    this.numPendingDeletionBlocks = 0;
+    this.readCount = new AtomicLong(0L);
+    this.readBytes =  new AtomicLong(0L);
+    this.writeCount =  new AtomicLong(0L);
+    this.writeBytes =  new AtomicLong(0L);
   }
 
   /**
@@ -293,6 +332,14 @@ public class ContainerData {
   }
 
   /**
+   * checks if the container is invalid.
+   * @return - boolean
+   */
+  public boolean isValid() {
+    return !(ContainerLifeCycleState.INVALID == state);
+  }
+
+  /**
    * Marks this container as closed.
    */
   public synchronized void closeContainer() {
@@ -317,11 +364,119 @@ public class ContainerData {
     this.bytesUsed.set(used);
   }
 
-  public long addBytesUsed(long delta) {
-    return this.bytesUsed.addAndGet(delta);
-  }
-
+  /**
+   * Get the number of bytes used by the container.
+   * @return the number of bytes used by the container.
+   */
   public long getBytesUsed() {
     return bytesUsed.get();
   }
+
+  /**
+   * Increase the number of bytes used by the container.
+   * @param used number of bytes used by the container.
+   * @return the current number of bytes used by the container afert increase.
+   */
+  public long incrBytesUsed(long used) {
+    return this.bytesUsed.addAndGet(used);
+  }
+
+
+  /**
+   * Decrease the number of bytes used by the container.
+   * @param reclaimed the number of bytes reclaimed from the container.
+   * @return the current number of bytes used by the container after decrease.
+   */
+  public long decrBytesUsed(long reclaimed) {
+    return this.bytesUsed.addAndGet(-1L * reclaimed);
+  }
+
+  /**
+   * Increase the count of pending deletion blocks.
+   *
+   * @param numBlocks increment number
+   */
+  public void incrPendingDeletionBlocks(int numBlocks) {
+    this.numPendingDeletionBlocks += numBlocks;
+  }
+
+  /**
+   * Decrease the count of pending deletion blocks.
+   *
+   * @param numBlocks decrement number
+   */
+  public void decrPendingDeletionBlocks(int numBlocks) {
+    this.numPendingDeletionBlocks -= numBlocks;
+  }
+
+  /**
+   * Get the number of pending deletion blocks.
+   */
+  public int getNumPendingDeletionBlocks() {
+    return this.numPendingDeletionBlocks;
+  }
+
+  /**
+   * Get the number of bytes read from the container.
+   * @return the number of bytes read from the container.
+   */
+  public long getReadBytes() {
+    return readBytes.get();
+  }
+
+  /**
+   * Increase the number of bytes read from the container.
+   * @param bytes number of bytes read.
+   */
+  public void incrReadBytes(long bytes) {
+    this.readBytes.addAndGet(bytes);
+  }
+
+  /**
+   * Get the number of times the container is read.
+   * @return the number of times the container is read.
+   */
+  public long getReadCount() {
+    return readCount.get();
+  }
+
+  /**
+   * Increase the number of container read count by 1.
+   */
+  public void incrReadCount() {
+    this.readCount.incrementAndGet();
+  }
+
+  /**
+   * Get the number of bytes write into the container.
+   * @return the number of bytes write into the container.
+   */
+  public long getWriteBytes() {
+    return writeBytes.get();
+  }
+
+  /**
+   * Increase the number of bytes write into the container.
+   * @param bytes the number of bytes write into the container.
+   */
+  public void incrWriteBytes(long bytes) {
+    this.writeBytes.addAndGet(bytes);
+  }
+
+  /**
+   * Get the number of writes into the container.
+   * @return the number of writes into the container.
+   */
+  public long getWriteCount() {
+    return writeCount.get();
+  }
+
+  /**
+   * Increase the number of writes into the container by 1.
+   */
+  public void incrWriteCount() {
+    this.writeCount.incrementAndGet();
+  }
+
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e881267/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index c443ace..3a78c70 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -22,13 +22,14 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerLifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto
@@ -39,8 +40,6 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.StorageTypeProto;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -116,7 +115,7 @@ public class ContainerManagerImpl implements ContainerManager {
 
   // TODO: consider primitive collection like eclipse-collections
   // to avoid autoboxing overhead
-  private final ConcurrentSkipListMap<Long, ContainerStatus>
+  private final ConcurrentSkipListMap<Long, ContainerData>
       containerMap = new ConcurrentSkipListMap<>();
 
   // Use a non-fair RW lock for better throughput, we may revisit this decision
@@ -246,19 +245,20 @@ public class ContainerManagerImpl implements ContainerManager {
         // when loading the info we get a null, this often means last time
         // SCM was ending up at some middle phase causing that the metadata
         // was not populated. Such containers are marked as inactive.
-        containerMap.put(containerID, new ContainerStatus(null));
+        ContainerData cData = new ContainerData(containerID, conf,
+            ContainerLifeCycleState.INVALID);
+        containerMap.put(containerID, cData);
         return;
       }
       containerData = ContainerData.getFromProtBuf(containerDataProto, conf);
 
-      ContainerStatus containerStatus = new ContainerStatus(containerData);
       // Initialize pending deletion blocks count in in-memory
       // container status.
       MetadataStore metadata = KeyUtils.getDB(containerData, conf);
       List<Map.Entry<byte[], byte[]>> underDeletionBlocks = metadata
           .getSequentialRangeKVs(null, Integer.MAX_VALUE,
               MetadataKeyFilters.getDeletingKeyFilter());
-      containerStatus.incrPendingDeletionBlocks(underDeletionBlocks.size());
+      containerData.incrPendingDeletionBlocks(underDeletionBlocks.size());
 
       List<Map.Entry<byte[], byte[]>> liveKeys = metadata
           .getRangeKVs(null, Integer.MAX_VALUE,
@@ -277,9 +277,9 @@ public class ContainerManagerImpl implements ContainerManager {
           return 0L;
         }
       }).sum();
-      containerStatus.setBytesUsed(bytesUsed);
+      containerData.setBytesUsed(bytesUsed);
 
-      containerMap.put(containerID, containerStatus);
+      containerMap.put(containerID, containerData);
     } catch (IOException ex) {
       LOG.error("read failed for file: {} ex: {}", containerName,
           ex.getMessage());
@@ -287,7 +287,9 @@ public class ContainerManagerImpl implements ContainerManager {
       // TODO : Add this file to a recovery Queue.
 
       // Remember that this container is busted and we cannot use it.
-      containerMap.put(containerID, new ContainerStatus(null));
+      ContainerData cData = new ContainerData(containerID, conf,
+          ContainerLifeCycleState.INVALID);
+      containerMap.put(containerID, cData);
       throw new StorageContainerException("Unable to read container info",
           UNABLE_TO_READ_METADATA_DB);
     } finally {
@@ -456,18 +458,19 @@ public class ContainerManagerImpl implements ContainerManager {
             UNCLOSED_CONTAINER_IO);
       }
 
-      ContainerStatus status = containerMap.get(containerID);
-      if (status == null) {
+      ContainerData containerData = containerMap.get(containerID);
+      if (containerData == null) {
         LOG.debug("No such container. ID: {}", containerID);
         throw new StorageContainerException("No such container. ID : " +
             containerID, CONTAINER_NOT_FOUND);
       }
-      if (status.getContainer() == null) {
+
+      if(!containerData.isValid()) {
         LOG.debug("Invalid container data. ID: {}", containerID);
         throw new StorageContainerException("Invalid container data. Name : " +
             containerID, CONTAINER_NOT_FOUND);
       }
-      ContainerUtils.removeContainer(status.getContainer(), conf, forceDelete);
+      ContainerUtils.removeContainer(containerData, conf, forceDelete);
       containerMap.remove(containerID);
     } catch (StorageContainerException e) {
       throw e;
@@ -509,7 +512,7 @@ public class ContainerManagerImpl implements ContainerManager {
 
     readLock();
     try {
-      ConcurrentNavigableMap<Long, ContainerStatus> map;
+      ConcurrentNavigableMap<Long, ContainerData> map;
       if (startContainerID == 0) {
         map = containerMap.tailMap(containerMap.firstKey(), true);
       } else {
@@ -517,9 +520,9 @@ public class ContainerManagerImpl implements ContainerManager {
       }
 
       int currentCount = 0;
-      for (ContainerStatus entry : map.values()) {
+      for (ContainerData entry : map.values()) {
         if (currentCount < count) {
-          data.add(entry.getContainer());
+          data.add(entry);
           currentCount++;
         } else {
           return;
@@ -546,7 +549,7 @@ public class ContainerManagerImpl implements ContainerManager {
       throw new StorageContainerException("Unable to find the container. ID: "
           + containerID, CONTAINER_NOT_FOUND);
     }
-    ContainerData cData = containerMap.get(containerID).getContainer();
+    ContainerData cData = containerMap.get(containerID);
     if (cData == null) {
       throw new StorageContainerException("Invalid container data. ID: "
           + containerID, CONTAINER_INTERNAL_ERROR);
@@ -584,8 +587,7 @@ public class ContainerManagerImpl implements ContainerManager {
     // I/O failure, this allows us to take quick action in case of container
     // issues.
 
-    ContainerStatus status = new ContainerStatus(containerData);
-    containerMap.put(containerID, status);
+    containerMap.put(containerID, containerData);
   }
 
   @Override
@@ -614,7 +616,7 @@ public class ContainerManagerImpl implements ContainerManager {
 
     try {
       Path location = locationManager.getContainerPath();
-      ContainerData orgData = containerMap.get(containerID).getContainer();
+      ContainerData orgData = containerMap.get(containerID);
       if (orgData == null) {
         // updating a invalid container
         throw new StorageContainerException("Update a container with invalid" +
@@ -652,8 +654,7 @@ public class ContainerManagerImpl implements ContainerManager {
       }
 
       // Update the in-memory map
-      ContainerStatus newStatus = new ContainerStatus(data);
-      containerMap.replace(containerID, newStatus);
+      containerMap.replace(containerID, data);
     } catch (IOException e) {
       // Restore the container file from backup
       if(containerFileBK != null && containerFileBK.exists() && deleted) {
@@ -699,17 +700,12 @@ public class ContainerManagerImpl implements ContainerManager {
    */
   @Override
   public boolean isOpen(long containerID) throws StorageContainerException {
-    final ContainerStatus status = containerMap.get(containerID);
-    if (status == null) {
-      throw new StorageContainerException(
-          "Container status not found: " + containerID, CONTAINER_NOT_FOUND);
-    }
-    final ContainerData cData = status.getContainer();
-    if (cData == null) {
+    final ContainerData containerData = containerMap.get(containerID);
+    if (containerData == null) {
       throw new StorageContainerException(
           "Container not found: " + containerID, CONTAINER_NOT_FOUND);
     }
-    return cData.isOpen();
+    return containerData.isOpen();
   }
 
   /**
@@ -727,7 +723,7 @@ public class ContainerManagerImpl implements ContainerManager {
 
 
   @VisibleForTesting
-  public ConcurrentSkipListMap<Long, ContainerStatus> getContainerMap() {
+  public ConcurrentSkipListMap<Long, ContainerData> getContainerMap() {
     return containerMap;
   }
 
@@ -847,9 +843,9 @@ public class ContainerManagerImpl implements ContainerManager {
     // And we can never get the exact state since close might happen
     // after we iterate a point.
     return containerMap.entrySet().stream()
-        .filter(containerStatus ->
-            !containerStatus.getValue().getContainer().isOpen())
-        .map(containerStatus -> containerStatus.getValue().getContainer())
+        .filter(containerData ->
+            !containerData.getValue().isOpen())
+        .map(containerData -> containerData.getValue())
         .collect(Collectors.toList());
   }
 
@@ -865,7 +861,7 @@ public class ContainerManagerImpl implements ContainerManager {
     // No need for locking since containerMap is a ConcurrentSkipListMap
     // And we can never get the exact state since close might happen
     // after we iterate a point.
-    List<ContainerStatus> containers = containerMap.values().stream()
+    List<ContainerData> containers = containerMap.values().stream()
         .collect(Collectors.toList());
 
     ContainerReportsRequestProto.Builder crBuilder =
@@ -875,18 +871,17 @@ public class ContainerManagerImpl implements ContainerManager {
     crBuilder.setDatanodeDetails(datanodeDetails.getProtoBufMessage())
         .setType(ContainerReportsRequestProto.reportType.fullReport);
 
-    for (ContainerStatus container: containers) {
+    for (ContainerData container: containers) {
       StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder =
           StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder();
-      ciBuilder.setContainerID(container.getContainer().getContainerID())
-          .setSize(container.getContainer().getMaxSize())
-          .setUsed(container.getContainer().getBytesUsed())
-          .setKeyCount(container.getContainer().getKeyCount())
+      ciBuilder.setContainerID(container.getContainerID())
+          .setSize(container.getMaxSize())
+          .setUsed(container.getBytesUsed())
+          .setKeyCount(container.getKeyCount())
           .setReadCount(container.getReadCount())
           .setWriteCount(container.getWriteCount())
           .setReadBytes(container.getReadBytes())
-          .setWriteBytes(container.getWriteBytes())
-          .setContainerID(container.getContainer().getContainerID());
+          .setWriteBytes(container.getWriteBytes());
 
       crBuilder.addReports(ciBuilder.build());
     }
@@ -943,8 +938,8 @@ public class ContainerManagerImpl implements ContainerManager {
   public void incrPendingDeletionBlocks(int numBlocks, long containerId) {
     writeLock();
     try {
-      ContainerStatus status = containerMap.get(containerId);
-      status.incrPendingDeletionBlocks(numBlocks);
+      ContainerData cData = containerMap.get(containerId);
+      cData.incrPendingDeletionBlocks(numBlocks);
     } finally {
       writeUnlock();
     }
@@ -954,8 +949,8 @@ public class ContainerManagerImpl implements ContainerManager {
   public void decrPendingDeletionBlocks(int numBlocks, long containerId) {
     writeLock();
     try {
-      ContainerStatus status = containerMap.get(containerId);
-      status.decrPendingDeletionBlocks(numBlocks);
+      ContainerData cData = containerMap.get(containerId);
+      cData.decrPendingDeletionBlocks(numBlocks);
     } finally {
       writeUnlock();
     }
@@ -968,32 +963,37 @@ public class ContainerManagerImpl implements ContainerManager {
    */
   @Override
   public void incrReadCount(long containerId) {
-    ContainerStatus status = containerMap.get(containerId);
-    status.incrReadCount();
+    ContainerData cData = containerMap.get(containerId);
+    cData.incrReadCount();
   }
 
   public long getReadCount(long containerId) {
-    ContainerStatus status = containerMap.get(containerId);
-    return status.getReadCount();
+    ContainerData cData = containerMap.get(containerId);
+    return cData.getReadCount();
   }
 
   /**
-   * Increse the read counter for bytes read from the container.
+   * Increase the read counter for bytes read from the container.
    *
    * @param containerId - ID of the container.
    * @param readBytes     - bytes read from the container.
    */
   @Override
   public void incrReadBytes(long containerId, long readBytes) {
-    ContainerStatus status = containerMap.get(containerId);
-    status.incrReadBytes(readBytes);
+    ContainerData cData = containerMap.get(containerId);
+    cData.incrReadBytes(readBytes);
   }
 
+  /**
+   * Returns number of bytes read from the container.
+   * @param containerId
+   * @return
+   */
   public long getReadBytes(long containerId) {
     readLock();
     try {
-      ContainerStatus status = containerMap.get(containerId);
-      return status.getReadBytes();
+      ContainerData cData = containerMap.get(containerId);
+      return cData.getReadBytes();
     } finally {
       readUnlock();
     }
@@ -1006,13 +1006,13 @@ public class ContainerManagerImpl implements ContainerManager {
    */
   @Override
   public void incrWriteCount(long containerId) {
-    ContainerStatus status = containerMap.get(containerId);
-    status.incrWriteCount();
+    ContainerData cData = containerMap.get(containerId);
+    cData.incrWriteCount();
   }
 
   public long getWriteCount(long containerId) {
-    ContainerStatus status = containerMap.get(containerId);
-    return status.getWriteCount();
+    ContainerData cData = containerMap.get(containerId);
+    return cData.getWriteCount();
   }
 
   /**
@@ -1023,13 +1023,13 @@ public class ContainerManagerImpl implements ContainerManager {
    */
   @Override
   public void incrWriteBytes(long containerId, long writeBytes) {
-    ContainerStatus status = containerMap.get(containerId);
-    status.incrWriteBytes(writeBytes);
+    ContainerData cData = containerMap.get(containerId);
+    cData.incrWriteBytes(writeBytes);
   }
 
   public long getWriteBytes(long containerId) {
-    ContainerStatus status = containerMap.get(containerId);
-    return status.getWriteBytes();
+    ContainerData cData = containerMap.get(containerId);
+    return cData.getWriteBytes();
   }
 
   /**
@@ -1041,8 +1041,8 @@ public class ContainerManagerImpl implements ContainerManager {
    */
   @Override
   public long incrBytesUsed(long containerId, long used) {
-    ContainerStatus status = containerMap.get(containerId);
-    return status.incrBytesUsed(used);
+    ContainerData cData = containerMap.get(containerId);
+    return cData.incrBytesUsed(used);
   }
 
   /**
@@ -1054,13 +1054,13 @@ public class ContainerManagerImpl implements ContainerManager {
    */
   @Override
   public long decrBytesUsed(long containerId, long used) {
-    ContainerStatus status = containerMap.get(containerId);
-    return status.decrBytesUsed(used);
+    ContainerData cData = containerMap.get(containerId);
+    return cData.decrBytesUsed(used);
   }
 
   public long getBytesUsed(long containerId) {
-    ContainerStatus status = containerMap.get(containerId);
-    return status.getBytesUsed();
+    ContainerData cData = containerMap.get(containerId);
+    return cData.getBytesUsed();
   }
 
   /**
@@ -1071,8 +1071,8 @@ public class ContainerManagerImpl implements ContainerManager {
    */
   @Override
   public long getNumKeys(long containerId) {
-    ContainerStatus status = containerMap.get(containerId);
-    return status.getNumKeys();  }
+    ContainerData cData = containerMap.get(containerId);
+    return cData.getKeyCount();  }
 
   /**
    * Get the container report state to send via HB to SCM.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e881267/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStatus.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStatus.java
deleted file mode 100644
index 5577323..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStatus.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.impl;
-
-import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * This class represents the state of a container. if the
- * container reading encountered an error when we boot up we will post that
- * info to a recovery queue and keep the info in the containerMap.
- * <p/>
- * if and when the issue is fixed, the expectation is that this entry will be
- * deleted by the recovery thread from the containerMap and will insert entry
- * instead of modifying this class.
- */
-public class ContainerStatus {
-  private final ContainerData containerData;
-
-  /**
-   * Number of pending deletion blocks in container.
-   */
-  private int numPendingDeletionBlocks;
-
-  private AtomicLong readBytes;
-
-  private AtomicLong writeBytes;
-
-  private AtomicLong readCount;
-
-  private AtomicLong writeCount;
-
-  /**
-   * Creates a Container Status class.
-   *
-   * @param containerData - ContainerData.
-   */
-  ContainerStatus(ContainerData containerData) {
-    this.numPendingDeletionBlocks = 0;
-    this.containerData = containerData;
-    this.readCount = new AtomicLong(0L);
-    this.readBytes =  new AtomicLong(0L);
-    this.writeCount =  new AtomicLong(0L);
-    this.writeBytes =  new AtomicLong(0L);
-  }
-
-  /**
-   * Returns container if it is active. It is not active if we have had an
-   * error and we are waiting for the background threads to fix the issue.
-   *
-   * @return ContainerData.
-   */
-  public ContainerData getContainer() {
-    return containerData;
-  }
-
-  /**
-   * Increase the count of pending deletion blocks.
-   *
-   * @param numBlocks increment number
-   */
-  public void incrPendingDeletionBlocks(int numBlocks) {
-    this.numPendingDeletionBlocks += numBlocks;
-  }
-
-  /**
-   * Decrease the count of pending deletion blocks.
-   *
-   * @param numBlocks decrement number
-   */
-  public void decrPendingDeletionBlocks(int numBlocks) {
-    this.numPendingDeletionBlocks -= numBlocks;
-  }
-
-  /**
-   * Get the number of pending deletion blocks.
-   */
-  public int getNumPendingDeletionBlocks() {
-    return this.numPendingDeletionBlocks;
-  }
-
-  /**
-   * Get the number of bytes read from the container.
-   * @return the number of bytes read from the container.
-   */
-  public long getReadBytes() {
-    return readBytes.get();
-  }
-
-  /**
-   * Increase the number of bytes read from the container.
-   * @param bytes number of bytes read.
-   */
-  public void incrReadBytes(long bytes) {
-    this.readBytes.addAndGet(bytes);
-  }
-
-  /**
-   * Get the number of times the container is read.
-   * @return the number of times the container is read.
-   */
-  public long getReadCount() {
-    return readCount.get();
-  }
-
-  /**
-   * Increase the number of container read count by 1.
-   */
-  public void incrReadCount() {
-    this.readCount.incrementAndGet();
-  }
-
-  /**
-   * Get the number of bytes write into the container.
-   * @return the number of bytes write into the container.
-   */
-  public long getWriteBytes() {
-    return writeBytes.get();
-  }
-
-  /**
-   * Increase the number of bytes write into the container.
-   * @param bytes the number of bytes write into the container.
-   */
-  public void incrWriteBytes(long bytes) {
-    this.writeBytes.addAndGet(bytes);
-  }
-
-  /**
-   * Get the number of writes into the container.
-   * @return the number of writes into the container.
-   */
-  public long getWriteCount() {
-    return writeCount.get();
-  }
-
-  /**
-   * Increase the number of writes into the container by 1.
-   */
-  public void incrWriteCount() {
-    this.writeCount.incrementAndGet();
-  }
-
-  /**
-   * Get the number of bytes used by the container.
-   * @return the number of bytes used by the container.
-   */
-  public long getBytesUsed() {
-    return containerData.getBytesUsed();
-  }
-
-  /**
-   * Increase the number of bytes used by the container.
-   * @param used number of bytes used by the container.
-   * @return the current number of bytes used by the container afert increase.
-   */
-  public long incrBytesUsed(long used) {
-    return containerData.addBytesUsed(used);
-  }
-
-  /**
-   * Set the number of bytes used by the container.
-   * @param used the number of bytes used by the container.
-   */
-  public void setBytesUsed(long used) {
-    containerData.setBytesUsed(used);
-  }
-
-  /**
-   * Decrease the number of bytes used by the container.
-   * @param reclaimed the number of bytes reclaimed from the container.
-   * @return the current number of bytes used by the container after decrease.
-   */
-  public long decrBytesUsed(long reclaimed) {
-    return this.containerData.addBytesUsed(-1L * reclaimed);
-  }
-
-  /**
-   * Get the maximum container size.
-   * @return the maximum container size.
-   */
-  public long getMaxSize() {
-    return containerData.getMaxSize();
-  }
-
-  /**
-   * Set the maximum container size.
-   * @param size the maximum container size.
-   */
-  public void setMaxSize(long size) {
-    this.containerData.setMaxSize(size);
-  }
-
-  /**
-   * Get the number of keys in the container.
-   * @return the number of keys in the container.
-   */
-  public long getNumKeys() {
-    return containerData.getKeyCount();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e881267/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java
index 06177cb..97fdb9e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java
@@ -41,24 +41,24 @@ public class RandomContainerDeletionChoosingPolicy
 
   @Override
   public List<ContainerData> chooseContainerForBlockDeletion(int count,
-      Map<Long, ContainerStatus> candidateContainers)
+      Map<Long, ContainerData> candidateContainers)
       throws StorageContainerException {
     Preconditions.checkNotNull(candidateContainers,
         "Internal assertion: candidate containers cannot be null");
 
     int currentCount = 0;
     List<ContainerData> result = new LinkedList<>();
-    ContainerStatus[] values = new ContainerStatus[candidateContainers.size()];
+    ContainerData[] values = new ContainerData[candidateContainers.size()];
     // to get a shuffle list
-    for (ContainerStatus entry : DFSUtil.shuffle(
+    for (ContainerData entry : DFSUtil.shuffle(
         candidateContainers.values().toArray(values))) {
       if (currentCount < count) {
-        result.add(entry.getContainer());
+        result.add(entry);
         currentCount++;
 
         LOG.debug("Select container {} for block deletion, "
             + "pending deletion blocks num: {}.",
-            entry.getContainer().getContainerID(),
+            entry.getContainerID(),
             entry.getNumPendingDeletionBlocks());
       } else {
         break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e881267/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java
index 2463426..9a109e8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java
@@ -41,11 +41,11 @@ public class TopNOrderedContainerDeletionChoosingPolicy
   private static final Logger LOG =
       LoggerFactory.getLogger(TopNOrderedContainerDeletionChoosingPolicy.class);
 
-  /** customized comparator used to compare differentiate container status. **/
-  private static final Comparator<ContainerStatus> CONTAINER_STATUS_COMPARATOR
-      = new Comparator<ContainerStatus>() {
+  /** customized comparator used to compare differentiate container data. **/
+  private static final Comparator<ContainerData> CONTAINER_DATA_COMPARATOR
+      = new Comparator<ContainerData>() {
         @Override
-        public int compare(ContainerStatus c1, ContainerStatus c2) {
+        public int compare(ContainerData c1, ContainerData c2) {
           return Integer.compare(c2.getNumPendingDeletionBlocks(),
               c1.getNumPendingDeletionBlocks());
         }
@@ -53,28 +53,28 @@ public class TopNOrderedContainerDeletionChoosingPolicy
 
   @Override
   public List<ContainerData> chooseContainerForBlockDeletion(int count,
-      Map<Long, ContainerStatus> candidateContainers)
+      Map<Long, ContainerData> candidateContainers)
       throws StorageContainerException {
     Preconditions.checkNotNull(candidateContainers,
         "Internal assertion: candidate containers cannot be null");
 
     List<ContainerData> result = new LinkedList<>();
-    List<ContainerStatus> orderedList = new LinkedList<>();
+    List<ContainerData> orderedList = new LinkedList<>();
     orderedList.addAll(candidateContainers.values());
-    Collections.sort(orderedList, CONTAINER_STATUS_COMPARATOR);
+    Collections.sort(orderedList, CONTAINER_DATA_COMPARATOR);
 
     // get top N list ordered by pending deletion blocks' number
     int currentCount = 0;
-    for (ContainerStatus entry : orderedList) {
+    for (ContainerData entry : orderedList) {
       if (currentCount < count) {
         if (entry.getNumPendingDeletionBlocks() > 0) {
-          result.add(entry.getContainer());
+          result.add(entry);
           currentCount++;
 
           LOG.debug(
               "Select container {} for block deletion, "
                   + "pending deletion blocks num: {}.",
-              entry.getContainer().getContainerID(),
+              entry.getContainerID(),
               entry.getNumPendingDeletionBlocks());
         } else {
           LOG.debug("Stop looking for next container, there is no"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e881267/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java
index 6b60c52..1ed50fb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.container.common.interfaces;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerStatus;
 
 import java.util.List;
 import java.util.Map;
@@ -41,6 +40,6 @@ public interface ContainerDeletionChoosingPolicy {
    * @throws StorageContainerException
    */
   List<ContainerData> chooseContainerForBlockDeletion(int count,
-      Map<Long, ContainerStatus> candidateContainers)
+      Map<Long, ContainerData> candidateContainers)
       throws StorageContainerException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e881267/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index a7cab4e..89ee673 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -65,7 +65,6 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.ArrayList;
-import java.util.UUID;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX;
@@ -191,23 +190,23 @@ public class TestContainerPersistence {
     containerManager.createContainer(data);
     Assert.assertTrue(containerManager.getContainerMap()
         .containsKey(testContainerID));
-    ContainerStatus status = containerManager
+    ContainerData cData = containerManager
         .getContainerMap().get(testContainerID);
 
-    Assert.assertNotNull(status.getContainer());
-    Assert.assertNotNull(status.getContainer().getContainerPath());
-    Assert.assertNotNull(status.getContainer().getDBPath());
+    Assert.assertNotNull(cData);
+    Assert.assertNotNull(cData.getContainerPath());
+    Assert.assertNotNull(cData.getDBPath());
 
 
-    Assert.assertTrue(new File(status.getContainer().getContainerPath())
+    Assert.assertTrue(new File(cData.getContainerPath())
         .exists());
 
-    Path meta = Paths.get(status.getContainer().getDBPath()).getParent();
+    Path meta = Paths.get(cData.getDBPath()).getParent();
     Assert.assertTrue(meta != null && Files.exists(meta));
 
     MetadataStore store = null;
     try {
-      store = KeyUtils.getDB(status.getContainer(), conf);
+      store = KeyUtils.getDB(cData, conf);
       Assert.assertNotNull(store);
     } finally {
       if (store != null) {
@@ -762,7 +761,7 @@ public class TestContainerPersistence {
 
     // Verify in-memory map
     ContainerData actualNewData = containerManager.getContainerMap()
-        .get(testContainerID).getContainer();
+        .get(testContainerID);
     Assert.assertEquals("shire_new",
         actualNewData.getAllMetadata().get("VOLUME"));
     Assert.assertEquals("bilbo_new",
@@ -805,7 +804,7 @@ public class TestContainerPersistence {
 
     // Verify in-memory map
     actualNewData = containerManager.getContainerMap()
-        .get(testContainerID).getContainer();
+        .get(testContainerID);
     Assert.assertEquals("shire_new_1",
         actualNewData.getAllMetadata().get("VOLUME"));
     Assert.assertEquals("bilbo_new_1",


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: YARN-7530. Refactored YARN service API project location. Contributed by Chandni Singh

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/example-app.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/example-app.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/example-app.json
new file mode 100644
index 0000000..a2f41cf
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/example-app.json
@@ -0,0 +1,16 @@
+{
+  "name": "example-app",
+  "version": "1.0.0",
+  "components" :
+  [
+    {
+      "name": "simple",
+      "number_of_containers": 1,
+      "launch_command": "sleep 2",
+      "resource": {
+        "cpus": 1,
+        "memory": "128"
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/log4j.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/log4j.properties
new file mode 100644
index 0000000..81a3f6a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/log4j.properties
@@ -0,0 +1,19 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=info,stdout
+log4j.threshold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} (%F:%M(%L)) - %m%n

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/bad/bad.yarnfile
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/bad/bad.yarnfile b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/bad/bad.yarnfile
new file mode 100644
index 0000000..1d514d6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/bad/bad.yarnfile
@@ -0,0 +1,16 @@
+{
+  "name": "bad",
+  "version": "1.0.0",
+  "components" :
+  [
+    {
+      "name": "simple",
+      "number_of_containers": 1,
+      "launch_command": "sleep 2",
+      "resource": {
+        "cpus": 1,
+        "memory": "128"
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app1.yarnfile
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app1.yarnfile b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app1.yarnfile
new file mode 100644
index 0000000..823561d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app1.yarnfile
@@ -0,0 +1,16 @@
+{
+  "name": "example-app1",
+  "version": "1.0.0",
+  "components" :
+  [
+    {
+      "name": "simple",
+      "number_of_containers": 1,
+      "launch_command": "sleep 2",
+      "resource": {
+        "cpus": 1,
+        "memory": "128"
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app2.yarnfile
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app2.yarnfile b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app2.yarnfile
new file mode 100644
index 0000000..823561d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app2.yarnfile
@@ -0,0 +1,16 @@
+{
+  "name": "example-app1",
+  "version": "1.0.0",
+  "components" :
+  [
+    {
+      "name": "simple",
+      "number_of_containers": 1,
+      "launch_command": "sleep 2",
+      "resource": {
+        "cpus": 1,
+        "memory": "128"
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app3.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app3.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app3.json
new file mode 100644
index 0000000..8a3a561
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app3.json
@@ -0,0 +1,16 @@
+{
+  "name": "example-app3",
+  "version": "1.0.0",
+  "components" :
+  [
+    {
+      "name": "simple",
+      "number_of_containers": 1,
+      "launch_command": "sleep 2",
+      "resource": {
+        "cpus": 1,
+        "memory": "128"
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app1.yarnfile
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app1.yarnfile b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app1.yarnfile
new file mode 100644
index 0000000..823561d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app1.yarnfile
@@ -0,0 +1,16 @@
+{
+  "name": "example-app1",
+  "version": "1.0.0",
+  "components" :
+  [
+    {
+      "name": "simple",
+      "number_of_containers": 1,
+      "launch_command": "sleep 2",
+      "resource": {
+        "cpus": 1,
+        "memory": "128"
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app2.yarnfile
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app2.yarnfile b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app2.yarnfile
new file mode 100644
index 0000000..d8fd1d1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app2.yarnfile
@@ -0,0 +1,16 @@
+{
+  "name": "example-app2",
+  "version": "1.0.0",
+  "components" :
+  [
+    {
+      "name": "simple",
+      "number_of_containers": 1,
+      "launch_command": "sleep 2",
+      "resource": {
+        "cpus": 1,
+        "memory": "128"
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml
index 5f5e70b..51e19b6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml
@@ -34,5 +34,6 @@
 
     <modules>
         <module>hadoop-yarn-services-core</module>
+        <module>hadoop-yarn-services-api</module>
     </modules>
 </project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
index b2b34ec..490e9ad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
@@ -37,7 +37,6 @@
     <module>hadoop-yarn-applications-distributedshell</module>
     <module>hadoop-yarn-applications-unmanaged-am-launcher</module>
     <module>hadoop-yarn-services</module>
-    <module>hadoop-yarn-services-api</module>
   </modules>
 
  <profiles>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: YARN-8128. Documenting the per-node per-app file limit in YARN log aggregation. Contributed by Xuan Gong.

Posted by ar...@apache.org.
YARN-8128. Documenting the per-node per-app file limit in YARN log aggregation. Contributed by Xuan Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/328f0847
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/328f0847
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/328f0847

Branch: refs/heads/HDDS-48
Commit: 328f0847e3d98cad8c368d57499f31081c153237
Parents: a2cdffb
Author: Vinod Kumar Vavilapalli (I am also known as @tshooter.) <vi...@apache.org>
Authored: Thu May 17 12:47:10 2018 -0700
Committer: Vinod Kumar Vavilapalli (I am also known as @tshooter.) <vi...@apache.org>
Committed: Thu May 17 12:47:10 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/yarn/conf/YarnConfiguration.java | 10 ++++++++++
 .../LogAggregationFileController.java              | 17 ++++++-----------
 .../src/main/resources/yarn-default.xml            |  9 +++++++++
 3 files changed, 25 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/328f0847/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 5ba2e05..8e56cb8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1377,6 +1377,16 @@ public class YarnConfiguration extends Configuration {
       NM_PREFIX + "log-aggregation.roll-monitoring-interval-seconds";
   public static final long
       DEFAULT_NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS = -1;
+
+  /**
+   * Define how many aggregated log files per application per NM we can have
+   * in remote file system.
+   */
+  public static final String NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP
+      = NM_PREFIX + "log-aggregation.num-log-files-per-app";
+  public static final int
+      DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP = 30;
+
   /**
    * Number of threads used in log cleanup. Only applicable if Log aggregation
    * is disabled

http://git-wip-us.apache.org/repos/asf/hadoop/blob/328f0847/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
index aeef574..5ac89e9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
@@ -95,13 +95,6 @@ public abstract class LogAggregationFileController {
   protected static final FsPermission APP_LOG_FILE_UMASK = FsPermission
       .createImmutable((short) (0640 ^ 0777));
 
-  // This is temporary solution. The configuration will be deleted once
-  // we find a more scalable method to only write a single log file per LRS.
-  private static final String NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP
-      = YarnConfiguration.NM_PREFIX + "log-aggregation.num-log-files-per-app";
-  private static final int
-      DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP = 30;
-
   // This is temporary solution. The configuration will be deleted once we have
   // the FileSystem API to check whether append operation is supported or not.
   public static final String LOG_AGGREGATION_FS_SUPPORT_APPEND
@@ -122,12 +115,14 @@ public abstract class LogAggregationFileController {
    */
   public void initialize(Configuration conf, String controllerName) {
     this.conf = conf;
-    int configuredRentionSize =
-        conf.getInt(NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP,
-            DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP);
+    int configuredRentionSize = conf.getInt(
+        YarnConfiguration.NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP,
+        YarnConfiguration
+            .DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP);
     if (configuredRentionSize <= 0) {
       this.retentionSize =
-        DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP;
+          YarnConfiguration
+              .DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP;
     } else {
       this.retentionSize = configuredRentionSize;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/328f0847/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index e078206..156ca24 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3036,6 +3036,15 @@
   </property>
 
   <property>
+    <description>Define how many aggregated log files per application per NM
+    we can have in remote file system. By default, the total number of
+    aggregated log files per application per NM is 30.
+    </description>
+    <name>yarn.nodemanager.log-aggregation.num-log-files-per-app</name>
+    <value>30</value>
+  </property>
+
+  <property>
     <description>
     Enable/disable intermediate-data encryption at YARN level. For now,
     this only is used by the FileSystemRMStateStore to setup right


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: YARN-8206. Sending a kill does not immediately kill docker containers. Contributed by Eric Badger

Posted by ar...@apache.org.
YARN-8206. Sending a kill does not immediately kill docker containers. Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f11288e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f11288e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f11288e

Branch: refs/heads/HDDS-48
Commit: 5f11288e41fca2e414dcbea130c7702e29d4d610
Parents: 57c2feb
Author: Jason Lowe <jl...@apache.org>
Authored: Tue May 22 09:27:08 2018 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Tue May 22 09:27:08 2018 -0500

----------------------------------------------------------------------
 .../runtime/DockerLinuxContainerRuntime.java    |  93 ++++--
 .../runtime/TestDockerContainerRuntime.java     | 301 +++++++++----------
 2 files changed, 198 insertions(+), 196 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f11288e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 40cb031..787e892 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -617,19 +617,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
    */
   private boolean allowPrivilegedContainerExecution(Container container)
       throws ContainerExecutionException {
-    Map<String, String> environment = container.getLaunchContext()
-        .getEnvironment();
-    String runPrivilegedContainerEnvVar = environment
-        .get(ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER);
-
-    if (runPrivilegedContainerEnvVar == null) {
-      return false;
-    }
 
-    if (!runPrivilegedContainerEnvVar.equalsIgnoreCase("true")) {
-      LOG.warn("NOT running a privileged container. Value of " +
-          ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER
-          + "is invalid: " + runPrivilegedContainerEnvVar);
+    if(!isContainerRequestedAsPrivileged(container)) {
       return false;
     }
 
@@ -669,6 +658,20 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     return true;
   }
 
+  /**
+   * This function only returns whether a privileged container was requested,
+   * not whether the container was or will be launched as privileged.
+   * @param container
+   * @return
+   */
+  private boolean isContainerRequestedAsPrivileged(
+      Container container) {
+    String runPrivilegedContainerEnvVar = container.getLaunchContext()
+        .getEnvironment().get(ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER);
+    return Boolean.parseBoolean(runPrivilegedContainerEnvVar);
+  }
+
+  @VisibleForTesting
   private String mountReadOnlyPath(String mount,
       Map<Path, List<String>> localizedResources)
       throws ContainerExecutionException {
@@ -963,19 +966,16 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   public void signalContainer(ContainerRuntimeContext ctx)
       throws ContainerExecutionException {
     ContainerExecutor.Signal signal = ctx.getExecutionAttribute(SIGNAL);
-    String containerId = ctx.getContainer().getContainerId().toString();
     Map<String, String> env =
         ctx.getContainer().getLaunchContext().getEnvironment();
     try {
       if (ContainerExecutor.Signal.NULL.equals(signal)) {
         executeLivelinessCheck(ctx);
+      } else if (ContainerExecutor.Signal.TERM.equals(signal)) {
+        String containerId = ctx.getContainer().getContainerId().toString();
+        handleContainerStop(containerId, env);
       } else {
-        if (ContainerExecutor.Signal.KILL.equals(signal)
-            || ContainerExecutor.Signal.TERM.equals(signal)) {
-          handleContainerStop(containerId, env);
-        } else {
-          handleContainerKill(containerId, env, signal);
-        }
+        handleContainerKill(ctx, env, signal);
       }
     } catch (ContainerExecutionException e) {
       LOG.warn("Signal docker container failed. Exception: ", e);
@@ -1184,21 +1184,50 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     }
   }
 
-  private void handleContainerKill(String containerId, Map<String, String> env,
+  private void handleContainerKill(ContainerRuntimeContext ctx,
+      Map<String, String> env,
       ContainerExecutor.Signal signal) throws ContainerExecutionException {
-    DockerCommandExecutor.DockerContainerStatus containerStatus =
-        DockerCommandExecutor.getContainerStatus(containerId, conf,
-            privilegedOperationExecutor, nmContext);
-    if (DockerCommandExecutor.isKillable(containerStatus)) {
-      DockerKillCommand dockerKillCommand =
-          new DockerKillCommand(containerId).setSignal(signal.name());
-      DockerCommandExecutor.executeDockerCommand(dockerKillCommand, containerId,
-          env, conf, privilegedOperationExecutor, false, nmContext);
-    } else {
-      if (LOG.isDebugEnabled()) {
+    Container container = ctx.getContainer();
+
+    // Only need to check whether the container was asked to be privileged.
+    // If the container had failed the permissions checks upon launch, it
+    // would have never been launched and thus we wouldn't be here
+    // attempting to signal it.
+    if (isContainerRequestedAsPrivileged(container)) {
+      String containerId = container.getContainerId().toString();
+      DockerCommandExecutor.DockerContainerStatus containerStatus =
+          DockerCommandExecutor.getContainerStatus(containerId, conf,
+          privilegedOperationExecutor, nmContext);
+      if (DockerCommandExecutor.isKillable(containerStatus)) {
+        DockerKillCommand dockerKillCommand =
+            new DockerKillCommand(containerId).setSignal(signal.name());
+        DockerCommandExecutor.executeDockerCommand(dockerKillCommand,
+            containerId, env, conf, privilegedOperationExecutor, false,
+            nmContext);
+      } else {
         LOG.debug(
-            "Container status is " + containerStatus.getName()
-                + ", skipping kill - " + containerId);
+            "Container status is {}, skipping kill - {}",
+            containerStatus.getName(), containerId);
+      }
+    } else {
+      PrivilegedOperation privOp = new PrivilegedOperation(
+          PrivilegedOperation.OperationType.SIGNAL_CONTAINER);
+      privOp.appendArgs(ctx.getExecutionAttribute(RUN_AS_USER),
+          ctx.getExecutionAttribute(USER),
+          Integer.toString(PrivilegedOperation.RunAsUserCommand
+          .SIGNAL_CONTAINER.getValue()),
+          ctx.getExecutionAttribute(PID),
+          Integer.toString(ctx.getExecutionAttribute(SIGNAL).getValue()));
+      privOp.disableFailureLogging();
+      try {
+        privilegedOperationExecutor.executePrivilegedOperation(null,
+            privOp, null, null, false, false);
+      } catch (PrivilegedOperationException e) {
+        //Don't log the failure here. Some kinds of signaling failures are
+        // acceptable. Let the calling executor decide what to do.
+        throw new ContainerExecutionException("Signal container failed using "
+            + "signal: " + signal.name(), e
+            .getExitCode(), e.getOutput(), e.getErrorOutput());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f11288e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index af69e22..ef21ef0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -43,13 +43,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileg
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerClient;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerCommandExecutor;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerKillCommand;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerRmCommand;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerRunCommand;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerStartCommand;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerStopCommand;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerVolumeCommand;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.DockerCommandPlugin;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.ResourcePlugin;
@@ -87,6 +82,7 @@ import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
 
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.APPID;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.APPLICATION_LOCAL_DIRS;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_ID_STR;
@@ -103,7 +99,6 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.RESOURCES_OPTIONS;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.RUN_AS_USER;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.SIGNAL;
-import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.TC_COMMAND_FILE;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.USER;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.USER_FILECACHE_DIRS;
 import static org.mockito.Matchers.anyString;
@@ -317,20 +312,24 @@ public class TestDockerContainerRuntime {
         .isDockerContainerRequested(envOtherType));
   }
 
-  @SuppressWarnings("unchecked")
   private PrivilegedOperation capturePrivilegedOperation()
       throws PrivilegedOperationException {
+    return capturePrivilegedOperation(1);
+  }
+
+  @SuppressWarnings("unchecked")
+  private PrivilegedOperation capturePrivilegedOperation(int invocations)
+      throws PrivilegedOperationException {
     ArgumentCaptor<PrivilegedOperation> opCaptor = ArgumentCaptor.forClass(
         PrivilegedOperation.class);
 
-    //single invocation expected
     //due to type erasure + mocking, this verification requires a suppress
     // warning annotation on the entire method
-    verify(mockExecutor, times(1))
+    verify(mockExecutor, times(invocations))
         .executePrivilegedOperation(anyList(), opCaptor.capture(), any(
             File.class), anyMap(), anyBoolean(), anyBoolean());
 
-    //verification completed. we need to isolate specific invications.
+    //verification completed. we need to isolate specific invocations.
     // hence, reset mock here
     Mockito.reset(mockExecutor);
 
@@ -918,6 +917,8 @@ public class TestDockerContainerRuntime {
   @Test
   public void testLaunchPrivilegedContainersWithDisabledSetting()
       throws ContainerExecutionException {
+    conf.setBoolean(YarnConfiguration.NM_DOCKER_ALLOW_PRIVILEGED_CONTAINERS,
+        false);
     DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf, nmContext);
@@ -939,6 +940,7 @@ public class TestDockerContainerRuntime {
     //Enable privileged containers.
     conf.setBoolean(YarnConfiguration.NM_DOCKER_ALLOW_PRIVILEGED_CONTAINERS,
         true);
+    conf.set(YarnConfiguration.NM_DOCKER_PRIVILEGED_CONTAINERS_ACL, "");
 
     DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
         mockExecutor, mockCGroupsHandler);
@@ -1356,9 +1358,12 @@ public class TestDockerContainerRuntime {
   public void testDockerStopOnTermSignalWhenRunning()
       throws ContainerExecutionException, PrivilegedOperationException,
       IOException {
-    List<String> dockerCommands = getDockerCommandsForSignal(
-        ContainerExecutor.Signal.TERM,
-        DockerCommandExecutor.DockerContainerStatus.RUNNING);
+    when(mockExecutor
+        .executePrivilegedOperation(anyList(), any(PrivilegedOperation.class),
+        any(File.class), anyMap(), anyBoolean(), anyBoolean())).thenReturn(
+        DockerCommandExecutor.DockerContainerStatus.RUNNING.getName());
+    List<String> dockerCommands = getDockerCommandsForDockerStop(
+        ContainerExecutor.Signal.TERM);
     Assert.assertEquals(4, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]", dockerCommands.get(0));
     Assert.assertEquals("  docker-command=stop", dockerCommands.get(1));
@@ -1370,11 +1375,54 @@ public class TestDockerContainerRuntime {
 
   @Test
   public void testDockerStopOnKillSignalWhenRunning()
+      throws ContainerExecutionException, PrivilegedOperationException {
+    List<String> dockerCommands = getDockerCommandsForSignal(
+        ContainerExecutor.Signal.KILL);
+    Assert.assertEquals(5, dockerCommands.size());
+    Assert.assertEquals(runAsUser, dockerCommands.get(0));
+    Assert.assertEquals(user, dockerCommands.get(1));
+    Assert.assertEquals(
+        Integer.toString(PrivilegedOperation.RunAsUserCommand
+        .SIGNAL_CONTAINER.getValue()),
+        dockerCommands.get(2));
+    Assert.assertEquals(signalPid, dockerCommands.get(3));
+    Assert.assertEquals(
+        Integer.toString(ContainerExecutor.Signal.KILL.getValue()),
+        dockerCommands.get(4));
+  }
+
+  @Test
+  public void testDockerKillOnQuitSignalWhenRunning() throws Exception {
+    List<String> dockerCommands = getDockerCommandsForSignal(
+        ContainerExecutor.Signal.QUIT);
+
+    Assert.assertEquals(5, dockerCommands.size());
+    Assert.assertEquals(runAsUser, dockerCommands.get(0));
+    Assert.assertEquals(user, dockerCommands.get(1));
+    Assert.assertEquals(
+        Integer.toString(PrivilegedOperation.RunAsUserCommand
+        .SIGNAL_CONTAINER.getValue()),
+        dockerCommands.get(2));
+    Assert.assertEquals(signalPid, dockerCommands.get(3));
+    Assert.assertEquals(
+        Integer.toString(ContainerExecutor.Signal.QUIT.getValue()),
+        dockerCommands.get(4));
+  }
+
+  @Test
+  public void testDockerStopOnTermSignalWhenRunningPrivileged()
       throws ContainerExecutionException, PrivilegedOperationException,
       IOException {
-    List<String> dockerCommands = getDockerCommandsForSignal(
-        ContainerExecutor.Signal.KILL,
-        DockerCommandExecutor.DockerContainerStatus.RUNNING);
+    conf.set(YarnConfiguration.NM_DOCKER_ALLOW_PRIVILEGED_CONTAINERS, "true");
+    conf.set(YarnConfiguration.NM_DOCKER_PRIVILEGED_CONTAINERS_ACL,
+        submittingUser);
+    env.put(ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER, "true");
+    when(mockExecutor
+        .executePrivilegedOperation(anyList(), any(PrivilegedOperation.class),
+        any(File.class), anyMap(), anyBoolean(), anyBoolean())).thenReturn(
+        DockerCommandExecutor.DockerContainerStatus.RUNNING.getName());
+    List<String> dockerCommands = getDockerCommandsForDockerStop(
+        ContainerExecutor.Signal.TERM);
     Assert.assertEquals(4, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]", dockerCommands.get(0));
     Assert.assertEquals("  docker-command=stop", dockerCommands.get(1));
@@ -1385,10 +1433,42 @@ public class TestDockerContainerRuntime {
   }
 
   @Test
-  public void testDockerKillOnQuitSignalWhenRunning() throws Exception {
-    List<String> dockerCommands = getDockerCommandsForSignal(
-        ContainerExecutor.Signal.QUIT,
-        DockerCommandExecutor.DockerContainerStatus.RUNNING);
+  public void testDockerStopOnKillSignalWhenRunningPrivileged()
+      throws ContainerExecutionException, PrivilegedOperationException,
+      IOException {
+    conf.set(YarnConfiguration.NM_DOCKER_ALLOW_PRIVILEGED_CONTAINERS, "true");
+    conf.set(YarnConfiguration.NM_DOCKER_PRIVILEGED_CONTAINERS_ACL,
+        submittingUser);
+    env.put(ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER, "true");
+    when(mockExecutor
+        .executePrivilegedOperation(anyList(), any(PrivilegedOperation.class),
+        any(File.class), anyMap(), anyBoolean(), anyBoolean())).thenReturn(
+        DockerCommandExecutor.DockerContainerStatus.RUNNING.getName());
+    List<String> dockerCommands = getDockerCommandsForDockerStop(
+        ContainerExecutor.Signal.KILL);
+    Assert.assertEquals(4, dockerCommands.size());
+    Assert.assertEquals("[docker-command-execution]", dockerCommands.get(0));
+    Assert.assertEquals("  docker-command=kill", dockerCommands.get(1));
+    Assert.assertEquals(
+        "  name=container_e11_1518975676334_14532816_01_000001",
+        dockerCommands.get(2));
+    Assert.assertEquals("  signal=KILL", dockerCommands.get(3));
+  }
+
+  @Test
+  public void testDockerKillOnQuitSignalWhenRunningPrivileged()
+      throws Exception {
+    conf.set(YarnConfiguration.NM_DOCKER_ALLOW_PRIVILEGED_CONTAINERS, "true");
+    conf.set(YarnConfiguration.NM_DOCKER_PRIVILEGED_CONTAINERS_ACL,
+        submittingUser);
+    env.put(ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER, "true");
+    when(mockExecutor
+        .executePrivilegedOperation(anyList(), any(PrivilegedOperation.class),
+        any(File.class), anyMap(), anyBoolean(), anyBoolean())).thenReturn(
+        DockerCommandExecutor.DockerContainerStatus.RUNNING.getName());
+    List<String> dockerCommands = getDockerCommandsForDockerStop(
+        ContainerExecutor.Signal.QUIT);
+
     Assert.assertEquals(4, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]", dockerCommands.get(0));
     Assert.assertEquals("  docker-command=kill", dockerCommands.get(1));
@@ -1403,8 +1483,8 @@ public class TestDockerContainerRuntime {
     env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_DELAYED_REMOVAL,
         "false");
     conf.set(YarnConfiguration.NM_DOCKER_ALLOW_DELAYED_REMOVAL, "true");
-    MockRuntime runtime = new MockRuntime(mockExecutor,
-        DockerCommandExecutor.DockerContainerStatus.EXITED, true);
+    DockerLinuxContainerRuntime runtime =
+        new DockerLinuxContainerRuntime(mockExecutor, mockCGroupsHandler);
     builder.setExecutionAttribute(RUN_AS_USER, runAsUser)
         .setExecutionAttribute(USER, user);
     runtime.initialize(enableMockContainerExecutor(conf), null);
@@ -1420,8 +1500,8 @@ public class TestDockerContainerRuntime {
     env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_DELAYED_REMOVAL,
         "true");
     conf.set(YarnConfiguration.NM_DOCKER_ALLOW_DELAYED_REMOVAL, "true");
-    MockRuntime runtime = new MockRuntime(mockExecutor,
-        DockerCommandExecutor.DockerContainerStatus.EXITED, true);
+    DockerLinuxContainerRuntime runtime =
+        new DockerLinuxContainerRuntime(mockExecutor, mockCGroupsHandler);
     builder.setExecutionAttribute(RUN_AS_USER, runAsUser)
         .setExecutionAttribute(USER, user);
     runtime.initialize(enableMockContainerExecutor(conf), null);
@@ -1431,21 +1511,21 @@ public class TestDockerContainerRuntime {
             File.class), anyMap(), anyBoolean(), anyBoolean());
   }
 
-  private List<String> getDockerCommandsForSignal(
-      ContainerExecutor.Signal signal,
-      DockerCommandExecutor.DockerContainerStatus status)
+  private List<String> getDockerCommandsForDockerStop(
+      ContainerExecutor.Signal signal)
       throws ContainerExecutionException, PrivilegedOperationException,
       IOException {
 
-    MockRuntime runtime = new MockRuntime(mockExecutor, status, false);
+    DockerLinuxContainerRuntime runtime =
+        new DockerLinuxContainerRuntime(mockExecutor, mockCGroupsHandler);
     builder.setExecutionAttribute(RUN_AS_USER, runAsUser)
         .setExecutionAttribute(USER, user)
         .setExecutionAttribute(PID, signalPid)
         .setExecutionAttribute(SIGNAL, signal);
-    runtime.initialize(enableMockContainerExecutor(conf), null);
+    runtime.initialize(enableMockContainerExecutor(conf), nmContext);
     runtime.signalContainer(builder.build());
 
-    PrivilegedOperation op = capturePrivilegedOperation();
+    PrivilegedOperation op = capturePrivilegedOperation(2);
     Assert.assertEquals(op.getOperationType(),
         PrivilegedOperation.OperationType.RUN_DOCKER_CMD);
     String dockerCommandFile = op.getArguments().get(0);
@@ -1453,6 +1533,25 @@ public class TestDockerContainerRuntime {
         Charset.forName("UTF-8"));
   }
 
+  private List<String> getDockerCommandsForSignal(
+      ContainerExecutor.Signal signal)
+      throws ContainerExecutionException, PrivilegedOperationException {
+
+    DockerLinuxContainerRuntime runtime =
+        new DockerLinuxContainerRuntime(mockExecutor, mockCGroupsHandler);
+    builder.setExecutionAttribute(RUN_AS_USER, runAsUser)
+        .setExecutionAttribute(USER, user)
+        .setExecutionAttribute(PID, signalPid)
+        .setExecutionAttribute(SIGNAL, signal);
+    runtime.initialize(enableMockContainerExecutor(conf), null);
+    runtime.signalContainer(builder.build());
+
+    PrivilegedOperation op = capturePrivilegedOperation();
+    Assert.assertEquals(op.getOperationType(),
+        PrivilegedOperation.OperationType.SIGNAL_CONTAINER);
+    return op.getArguments();
+  }
+
   /**
    * Return a configuration object with the mock container executor binary
    * preconfigured.
@@ -1937,12 +2036,16 @@ public class TestDockerContainerRuntime {
   public void testDockerContainerRelaunch()
       throws ContainerExecutionException, PrivilegedOperationException,
       IOException {
-    DockerLinuxContainerRuntime runtime = new MockRuntime(mockExecutor,
-        DockerCommandExecutor.DockerContainerStatus.EXITED, false);
-    runtime.initialize(conf, null);
+    DockerLinuxContainerRuntime runtime =
+        new DockerLinuxContainerRuntime(mockExecutor, mockCGroupsHandler);
+    when(mockExecutor
+        .executePrivilegedOperation(anyList(), any(PrivilegedOperation.class),
+        any(File.class), anyMap(), anyBoolean(), anyBoolean())).thenReturn(
+        DockerCommandExecutor.DockerContainerStatus.STOPPED.getName());
+    runtime.initialize(conf, nmContext);
     runtime.relaunchContainer(builder.build());
 
-    PrivilegedOperation op = capturePrivilegedOperation();
+    PrivilegedOperation op = capturePrivilegedOperation(2);
     List<String> args = op.getArguments();
     String dockerCommandFile = args.get(11);
 
@@ -1960,134 +2063,4 @@ public class TestDockerContainerRuntime {
         "  name=container_e11_1518975676334_14532816_01_000001",
         dockerCommands.get(counter));
   }
-
-  class MockRuntime extends DockerLinuxContainerRuntime {
-
-    private PrivilegedOperationExecutor privilegedOperationExecutor;
-    private DockerCommandExecutor.DockerContainerStatus containerStatus;
-    private boolean delayedRemovalAllowed;
-
-    MockRuntime(PrivilegedOperationExecutor privilegedOperationExecutor,
-        DockerCommandExecutor.DockerContainerStatus containerStatus,
-        boolean delayedRemovalAllowed) {
-      super(privilegedOperationExecutor);
-      this.privilegedOperationExecutor = privilegedOperationExecutor;
-      this.containerStatus = containerStatus;
-      this.delayedRemovalAllowed = delayedRemovalAllowed;
-    }
-
-    @Override
-    public void signalContainer(ContainerRuntimeContext ctx)
-        throws ContainerExecutionException {
-      ContainerExecutor.Signal signal = ctx.getExecutionAttribute(SIGNAL);
-      String containerName = ctx.getContainer().getContainerId().toString();
-      Map<String, String> environment =
-          ctx.getContainer().getLaunchContext().getEnvironment();
-      try {
-        if (ContainerExecutor.Signal.KILL.equals(signal)
-            || ContainerExecutor.Signal.TERM.equals(signal)) {
-          if (DockerCommandExecutor.isStoppable(containerStatus)) {
-            DockerStopCommand dockerStopCommand =
-                new DockerStopCommand(containerName)
-                .setGracePeriod(dockerStopGracePeriod);
-            DockerCommandExecutor.executeDockerCommand(dockerStopCommand,
-                containerName, environment, conf, mockExecutor, false,
-                nmContext);
-          }
-        } else {
-          if (DockerCommandExecutor.isKillable(containerStatus)) {
-            DockerKillCommand dockerKillCommand =
-                new DockerKillCommand(containerName);
-            dockerKillCommand.setSignal(signal.name());
-            DockerCommandExecutor.executeDockerCommand(dockerKillCommand,
-                containerName, environment, conf, mockExecutor, false,
-                nmContext);
-          }
-        }
-      } catch (ContainerExecutionException e) {
-        LOG.warn("Signal docker container failed. Exception: ", e);
-        throw new ContainerExecutionException("Signal docker container failed",
-            e.getExitCode(), e.getOutput(), e.getErrorOutput());
-      }
-    }
-
-    @Override
-    public void reapContainer(ContainerRuntimeContext ctx)
-        throws ContainerExecutionException {
-      String delayedRemoval = env.get(ENV_DOCKER_CONTAINER_DELAYED_REMOVAL);
-      if (delayedRemovalAllowed && delayedRemoval != null
-          && delayedRemoval.equalsIgnoreCase("true")) {
-        LOG.info("Delayed removal requested and allowed, skipping removal - "
-            + containerId);
-      } else {
-        if (DockerCommandExecutor.isRemovable(containerStatus)) {
-          DockerRmCommand dockerRmCommand = new DockerRmCommand(containerId);
-          DockerCommandExecutor
-              .executeDockerCommand(dockerRmCommand, containerId, env, conf,
-                  privilegedOperationExecutor, false, nmContext);
-        }
-      }
-    }
-
-    @Override
-    public void relaunchContainer(ContainerRuntimeContext ctx)
-        throws ContainerExecutionException {
-      if (DockerCommandExecutor.isRemovable(containerStatus)) {
-        String relaunchContainerIdStr =
-            ctx.getContainer().getContainerId().toString();
-        DockerStartCommand startCommand =
-            new DockerStartCommand(containerIdStr);
-        DockerClient dockerClient = new DockerClient(conf);
-        String commandFile = dockerClient.writeCommandToTempFile(startCommand,
-            relaunchContainerIdStr);
-        String relaunchRunAsUser = ctx.getExecutionAttribute(RUN_AS_USER);
-        Path relaunchNmPrivateContainerScriptPath = ctx.getExecutionAttribute(
-            NM_PRIVATE_CONTAINER_SCRIPT_PATH);
-        Path relaunchContainerWorkDir =
-            ctx.getExecutionAttribute(CONTAINER_WORK_DIR);
-        //we can't do better here thanks to type-erasure
-        @SuppressWarnings("unchecked")
-        List<String> relaunchLocalDirs = ctx.getExecutionAttribute(LOCAL_DIRS);
-        @SuppressWarnings("unchecked")
-        List<String> relaunchLogDirs = ctx.getExecutionAttribute(LOG_DIRS);
-        String resourcesOpts = ctx.getExecutionAttribute(RESOURCES_OPTIONS);
-
-        PrivilegedOperation launchOp = new PrivilegedOperation(
-            PrivilegedOperation.OperationType.LAUNCH_DOCKER_CONTAINER);
-
-        launchOp.appendArgs(relaunchRunAsUser, ctx.getExecutionAttribute(USER),
-            Integer.toString(PrivilegedOperation
-                .RunAsUserCommand.LAUNCH_DOCKER_CONTAINER.getValue()),
-            ctx.getExecutionAttribute(APPID),
-            relaunchContainerIdStr,
-            relaunchContainerWorkDir.toString(),
-            relaunchNmPrivateContainerScriptPath.toUri().getPath(),
-            ctx.getExecutionAttribute(NM_PRIVATE_TOKENS_PATH).toUri().getPath(),
-            ctx.getExecutionAttribute(PID_FILE_PATH).toString(),
-            StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
-                relaunchLocalDirs),
-            StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
-                relaunchLogDirs),
-            commandFile,
-            resourcesOpts);
-
-        String tcCommandFile = ctx.getExecutionAttribute(TC_COMMAND_FILE);
-
-        if (tcCommandFile != null) {
-          launchOp.appendArgs(tcCommandFile);
-        }
-
-        try {
-          privilegedOperationExecutor.executePrivilegedOperation(null,
-              launchOp, null, null, false, false);
-        } catch (PrivilegedOperationException e) {
-          LOG.warn("Relaunch container failed. Exception: ", e);
-          LOG.info("Docker command used: " + startCommand);
-
-          throw new ContainerExecutionException("Launch container failed", e
-              .getExitCode(), e.getOutput(), e.getErrorOutput());
-        }
-      }
-    }
-  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: HDFS-13560. Insufficient system resources exist to complete the requested service for some tests on Windows. Contributed by Anbang Hu.

Posted by ar...@apache.org.
HDFS-13560. Insufficient system resources exist to complete the requested service for some tests on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53b807a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53b807a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53b807a6

Branch: refs/heads/HDDS-48
Commit: 53b807a6a8486cefe0b036f7893de9f619bd44a1
Parents: a97a204
Author: Inigo Goiri <in...@apache.org>
Authored: Thu May 17 17:03:23 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Thu May 17 17:03:23 2018 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/util/Shell.java  | 15 +++++++++++++++
 .../hdfs/server/datanode/TestDirectoryScanner.java   |  3 ++-
 .../datanode/fsdataset/impl/LazyPersistTestCase.java |  3 ++-
 .../hdfs/server/namenode/TestNameNodeMXBean.java     |  7 +++++--
 4 files changed, 24 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b807a6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index c25cba2..04b4b4f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -1388,4 +1388,19 @@ public abstract class Shell {
       return new HashSet<>(CHILD_SHELLS.keySet());
     }
   }
+
+  /**
+   * Static method to return the memory lock limit for datanode.
+   * @param ulimit max value at which memory locked should be capped.
+   * @return long value specifying the memory lock limit.
+   */
+  public static Long getMemlockLimit(Long ulimit) {
+    if (WINDOWS) {
+      // HDFS-13560: if ulimit is too large on Windows, Windows will complain
+      // "1450: Insufficient system resources exist to complete the requested
+      // service". Thus, cap Windows memory lock limit at Integer.MAX_VALUE.
+      return Math.min(Integer.MAX_VALUE, ulimit);
+    }
+    return ulimit;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b807a6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index c95c71b..f792523 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.apache.hadoop.util.Shell.getMemlockLimit;
 import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
@@ -99,7 +100,7 @@ public class TestDirectoryScanner {
     CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
     CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
     CONF.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
-                 Long.MAX_VALUE);
+                 getMemlockLimit(Long.MAX_VALUE));
   }
 
   @Before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b807a6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
index c412dad..aae59dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
@@ -25,6 +25,7 @@ import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST;
 import static org.apache.hadoop.fs.StorageType.DEFAULT;
 import static org.apache.hadoop.fs.StorageType.RAM_DISK;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+import static org.apache.hadoop.util.Shell.getMemlockLimit;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThat;
@@ -421,7 +422,7 @@ public abstract class LazyPersistTestCase {
     private StorageType[] storageTypes = null;
     private int ramDiskReplicaCapacity = -1;
     private long ramDiskStorageLimit = -1;
-    private long maxLockedMemory = Long.MAX_VALUE;
+    private long maxLockedMemory = getMemlockLimit(Long.MAX_VALUE);
     private boolean hasTransientStorage = true;
     private boolean useScr = false;
     private boolean useLegacyBlockReaderLocal = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b807a6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index 9c165d8..3728420 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -75,6 +75,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
+import static org.apache.hadoop.util.Shell.getMemlockLimit;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
@@ -103,8 +104,10 @@ public class TestNameNodeMXBean {
   @Test
   public void testNameNodeMXBeanInfo() throws Exception {
     Configuration conf = new Configuration();
+    Long maxLockedMemory = getMemlockLimit(
+        NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
     conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
-      NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
+        maxLockedMemory);
     MiniDFSCluster cluster = null;
 
     try {
@@ -256,7 +259,7 @@ public class TestNameNodeMXBean {
       assertEquals(1, statusMap.get("active").size());
       assertEquals(1, statusMap.get("failed").size());
       assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed"));
-      assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() *
+      assertEquals(maxLockedMemory *
           cluster.getDataNodes().size(),
               mbs.getAttribute(mxbeanName, "CacheCapacity"));
       assertNull("RollingUpgradeInfo should be null when there is no rolling"


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: Skip the proxy user check if the ugi has not been initialized. Contributed by Daryn Sharp

Posted by ar...@apache.org.
Skip the proxy user check if the ugi has not been initialized. Contributed by Daryn Sharp


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73e9120a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73e9120a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73e9120a

Branch: refs/heads/HDDS-48
Commit: 73e9120ad79c73703de21e0084591861813f3279
Parents: f48fec8
Author: Rushabh Shah <sh...@apache.org>
Authored: Mon May 21 12:33:00 2018 -0500
Committer: Rushabh Shah <sh...@apache.org>
Committed: Mon May 21 12:33:00 2018 -0500

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/conf/Configuration.java  | 2 +-
 .../org/apache/hadoop/security/UserGroupInformation.java     | 8 ++++++--
 2 files changed, 7 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e9120a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index f1e2a9d..52f20b0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -281,7 +281,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     }
 
     private static boolean getRestrictParserDefault(Object resource) {
-      if (resource instanceof String) {
+      if (resource instanceof String || !UserGroupInformation.isInitialized()) {
         return false;
       }
       UserGroupInformation user;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e9120a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index cb132b3..3872810 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -288,14 +288,18 @@ public class UserGroupInformation {
   public static final String HADOOP_TOKEN_FILE_LOCATION = 
     "HADOOP_TOKEN_FILE_LOCATION";
   
+  public static boolean isInitialized() {
+    return conf != null;
+  }
+
   /** 
    * A method to initialize the fields that depend on a configuration.
    * Must be called before useKerberos or groups is used.
    */
   private static void ensureInitialized() {
-    if (conf == null) {
+    if (!isInitialized()) {
       synchronized(UserGroupInformation.class) {
-        if (conf == null) { // someone might have beat us
+        if (!isInitialized()) { // someone might have beat us
           initialize(new Configuration(), false);
         }
       }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: HDFS-13593. TestBlockReaderLocalLegacy#testBlockReaderLocalLegacyWithAppend fails on Windows. Contributed by Anbang Hu.

Posted by ar...@apache.org.
HDFS-13593. TestBlockReaderLocalLegacy#testBlockReaderLocalLegacyWithAppend fails on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9775ecb2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9775ecb2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9775ecb2

Branch: refs/heads/HDDS-48
Commit: 9775ecb2355d7bed3514fcd54bf69e8351c4ab99
Parents: 57b893d
Author: Inigo Goiri <in...@apache.org>
Authored: Fri May 18 09:46:02 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri May 18 09:46:02 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/client/impl/TestBlockReaderLocalLegacy.java   | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9775ecb2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalLegacy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalLegacy.java
index 273619c..285cdb6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalLegacy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalLegacy.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.BeforeClass;
@@ -93,8 +94,9 @@ public class TestBlockReaderLocalLegacy {
     final long FILE_LENGTH = 512L;
 
     HdfsConfiguration conf = getConfiguration(null);
+    File basedir = new File(GenericTestUtils.getRandomizedTempPath());
     MiniDFSCluster cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+        new MiniDFSCluster.Builder(conf, basedir).numDataNodes(1).build();
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
 
@@ -173,8 +175,9 @@ public class TestBlockReaderLocalLegacy {
     final HdfsConfiguration conf = getConfiguration(null);
     conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
 
+    File basedir = new File(GenericTestUtils.getRandomizedTempPath());
     final MiniDFSCluster cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+        new MiniDFSCluster.Builder(conf, basedir).numDataNodes(1).build();
     cluster.waitActive();
 
     final DistributedFileSystem dfs = cluster.getFileSystem();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: HDFS-13554. TestDatanodeRegistration#testForcedRegistration does not shut down cluster. Contributed by Anbang Hu.

Posted by ar...@apache.org.
HDFS-13554. TestDatanodeRegistration#testForcedRegistration does not shut down cluster. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65476458
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65476458
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65476458

Branch: refs/heads/HDDS-48
Commit: 65476458fa05656010809be632356e4015b59a17
Parents: d45a0b7
Author: Inigo Goiri <in...@apache.org>
Authored: Thu May 17 14:48:04 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Thu May 17 14:48:04 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/TestDatanodeRegistration.java   | 149 ++++++++++---------
 1 file changed, 78 insertions(+), 71 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65476458/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
index b6ae281..6421e8b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
@@ -330,86 +330,93 @@ public class TestDatanodeRegistration {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, 4);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, Integer.MAX_VALUE);
 
-    final MiniDFSCluster cluster =
-        new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
-    cluster.waitActive();
-    cluster.getHttpUri(0);
-    FSNamesystem fsn = cluster.getNamesystem();
-    String bpId = fsn.getBlockPoolId();
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+      cluster.getHttpUri(0);
+      FSNamesystem fsn = cluster.getNamesystem();
+      String bpId = fsn.getBlockPoolId();
 
-    DataNode dn = cluster.getDataNodes().get(0);
-    DatanodeDescriptor dnd =
-        NameNodeAdapter.getDatanode(fsn, dn.getDatanodeId());
-    DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
-    DatanodeStorageInfo storage = dnd.getStorageInfos()[0];
+      DataNode dn = cluster.getDataNodes().get(0);
+      DatanodeDescriptor dnd =
+          NameNodeAdapter.getDatanode(fsn, dn.getDatanodeId());
+      DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
+      DatanodeStorageInfo storage = dnd.getStorageInfos()[0];
 
-    // registration should not change after heartbeat.
-    assertTrue(dnd.isRegistered());
-    DatanodeRegistration lastReg = dn.getDNRegistrationForBP(bpId);
-    waitForHeartbeat(dn, dnd);
-    assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
+      // registration should not change after heartbeat.
+      assertTrue(dnd.isRegistered());
+      DatanodeRegistration lastReg = dn.getDNRegistrationForBP(bpId);
+      waitForHeartbeat(dn, dnd);
+      assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
 
-    // force a re-registration on next heartbeat.
-    dnd.setForceRegistration(true);
-    assertFalse(dnd.isRegistered());
-    waitForHeartbeat(dn, dnd);
-    assertTrue(dnd.isRegistered());
-    DatanodeRegistration newReg = dn.getDNRegistrationForBP(bpId);
-    assertNotSame(lastReg, newReg);
-    lastReg = newReg;
+      // force a re-registration on next heartbeat.
+      dnd.setForceRegistration(true);
+      assertFalse(dnd.isRegistered());
+      waitForHeartbeat(dn, dnd);
+      assertTrue(dnd.isRegistered());
+      DatanodeRegistration newReg = dn.getDNRegistrationForBP(bpId);
+      assertNotSame(lastReg, newReg);
+      lastReg = newReg;
 
-    // registration should not change on subsequent heartbeats.
-    waitForHeartbeat(dn, dnd);
-    assertTrue(dnd.isRegistered());
-    assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
-    assertTrue(waitForBlockReport(dn, dnd));
-    assertTrue(dnd.isRegistered());
-    assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
+      // registration should not change on subsequent heartbeats.
+      waitForHeartbeat(dn, dnd);
+      assertTrue(dnd.isRegistered());
+      assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
+      assertTrue(waitForBlockReport(dn, dnd));
+      assertTrue(dnd.isRegistered());
+      assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
 
-    // check that block report is not processed and registration didn't change.
-    dnd.setForceRegistration(true);
-    assertFalse(waitForBlockReport(dn, dnd));
-    assertFalse(dnd.isRegistered());
-    assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
+      // check that block report is not processed and registration didn't
+      // change.
+      dnd.setForceRegistration(true);
+      assertFalse(waitForBlockReport(dn, dnd));
+      assertFalse(dnd.isRegistered());
+      assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
 
-    // heartbeat should trigger re-registration, and next block report should
-    // not change registration.
-    waitForHeartbeat(dn, dnd);
-    assertTrue(dnd.isRegistered());
-    newReg = dn.getDNRegistrationForBP(bpId);
-    assertNotSame(lastReg, newReg);
-    lastReg = newReg;
-    assertTrue(waitForBlockReport(dn, dnd));
-    assertTrue(dnd.isRegistered());
-    assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
+      // heartbeat should trigger re-registration, and next block report
+      // should not change registration.
+      waitForHeartbeat(dn, dnd);
+      assertTrue(dnd.isRegistered());
+      newReg = dn.getDNRegistrationForBP(bpId);
+      assertNotSame(lastReg, newReg);
+      lastReg = newReg;
+      assertTrue(waitForBlockReport(dn, dnd));
+      assertTrue(dnd.isRegistered());
+      assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
 
-    // registration doesn't change.
-    ExtendedBlock eb = new ExtendedBlock(bpId, 1234);
-    dn.notifyNamenodeDeletedBlock(eb, storage.getStorageID());
-    DataNodeTestUtils.triggerDeletionReport(dn);
-    assertTrue(dnd.isRegistered());
-    assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
+      // registration doesn't change.
+      ExtendedBlock eb = new ExtendedBlock(bpId, 1234);
+      dn.notifyNamenodeDeletedBlock(eb, storage.getStorageID());
+      DataNodeTestUtils.triggerDeletionReport(dn);
+      assertTrue(dnd.isRegistered());
+      assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
 
-    // a failed IBR will effectively unregister the node.
-    boolean failed = false;
-    try {
-      // pass null to cause a failure since there aren't any easy failure
-      // modes since it shouldn't happen.
-      fsn.processIncrementalBlockReport(lastReg, null);
-    } catch (NullPointerException npe) {
-      failed = true;
-    }
-    assertTrue("didn't fail", failed);
-    assertFalse(dnd.isRegistered());
+      // a failed IBR will effectively unregister the node.
+      boolean failed = false;
+      try {
+        // pass null to cause a failure since there aren't any easy failure
+        // modes since it shouldn't happen.
+        fsn.processIncrementalBlockReport(lastReg, null);
+      } catch (NullPointerException npe) {
+        failed = true;
+      }
+      assertTrue("didn't fail", failed);
+      assertFalse(dnd.isRegistered());
 
-    // should remain unregistered until next heartbeat.
-    dn.notifyNamenodeDeletedBlock(eb, storage.getStorageID());
-    DataNodeTestUtils.triggerDeletionReport(dn);
-    assertFalse(dnd.isRegistered());
-    assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
-    waitForHeartbeat(dn, dnd);
-    assertTrue(dnd.isRegistered());
-    assertNotSame(lastReg, dn.getDNRegistrationForBP(bpId));
+      // should remain unregistered until next heartbeat.
+      dn.notifyNamenodeDeletedBlock(eb, storage.getStorageID());
+      DataNodeTestUtils.triggerDeletionReport(dn);
+      assertFalse(dnd.isRegistered());
+      assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
+      waitForHeartbeat(dn, dnd);
+      assertTrue(dnd.isRegistered());
+      assertNotSame(lastReg, dn.getDNRegistrationForBP(bpId));
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
   }
 
   private void waitForHeartbeat(final DataNode dn, final DatanodeDescriptor dnd)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: HDDS-89. Create ozone specific inline documentation as part of the build. Contributed by Elek, Marton.

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.ttf
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.ttf b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.ttf
new file mode 100644
index 0000000..1413fc6
Binary files /dev/null and b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.ttf differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff
new file mode 100644
index 0000000..9e61285
Binary files /dev/null and b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff2
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff2 b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff2
new file mode 100644
index 0000000..64539b5
Binary files /dev/null and b/hadoop-ozone/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff2 differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481bfdb9/hadoop-ozone/docs/themes/ozonedoc/static/js/bootstrap.min.js
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/static/js/bootstrap.min.js b/hadoop-ozone/docs/themes/ozonedoc/static/js/bootstrap.min.js
new file mode 100644
index 0000000..9bcd2fc
--- /dev/null
+++ b/hadoop-ozone/docs/themes/ozonedoc/static/js/bootstrap.min.js
@@ -0,0 +1,7 @@
+/*!
+ * Bootstrap v3.3.7 (http://getbootstrap.com)
+ * Copyright 2011-2016 Twitter, Inc.
+ * Licensed under the MIT license
+ */
+if("undefined"==typeof jQuery)throw new Error("Bootstrap's JavaScript requires jQuery");+function(a){"use strict";var b=a.fn.jquery.split(" ")[0].split(".");if(b[0]<2&&b[1]<9||1==b[0]&&9==b[1]&&b[2]<1||b[0]>3)throw new Error("Bootstrap's JavaScript requires jQuery version 1.9.1 or higher, but lower than version 4")}(jQuery),+function(a){"use strict";function b(){var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1}a.fn.emulateTransitionEnd=function(b){var c=!1,d=this;a(this).one("bsTransitionEnd",function(){c=!0});var e=function(){c||a(d).trigger(a.support.transition.end)};return setTimeout(e,b),this},a(function(){a.support.transition=b(),a.support.transition&&(a.event.special.bsTransitionEnd={bindType:a.support.transition.end,delegateType:a.support.transition.end,handle:function(b){if(a(b
 .target).is(this))return b.handleObj.handler.apply(this,arguments)}})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var c=a(this),e=c.data("bs.alert");e||c.data("bs.alert",e=new d(this)),"string"==typeof b&&e[b].call(c)})}var c='[data-dismiss="alert"]',d=function(b){a(b).on("click",c,this.close)};d.VERSION="3.3.7",d.TRANSITION_DURATION=150,d.prototype.close=function(b){function c(){g.detach().trigger("closed.bs.alert").remove()}var e=a(this),f=e.attr("data-target");f||(f=e.attr("href"),f=f&&f.replace(/.*(?=#[^\s]*$)/,""));var g=a("#"===f?[]:f);b&&b.preventDefault(),g.length||(g=e.closest(".alert")),g.trigger(b=a.Event("close.bs.alert")),b.isDefaultPrevented()||(g.removeClass("in"),a.support.transition&&g.hasClass("fade")?g.one("bsTransitionEnd",c).emulateTransitionEnd(d.TRANSITION_DURATION):c())};var e=a.fn.alert;a.fn.alert=b,a.fn.alert.Constructor=d,a.fn.alert.noConflict=function(){return a.fn.alert=e,this},a(document).on("click.bs.alert.data-api",c
 ,d.prototype.close)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.button"),f="object"==typeof b&&b;e||d.data("bs.button",e=new c(this,f)),"toggle"==b?e.toggle():b&&e.setState(b)})}var c=function(b,d){this.$element=a(b),this.options=a.extend({},c.DEFAULTS,d),this.isLoading=!1};c.VERSION="3.3.7",c.DEFAULTS={loadingText:"loading..."},c.prototype.setState=function(b){var c="disabled",d=this.$element,e=d.is("input")?"val":"html",f=d.data();b+="Text",null==f.resetText&&d.data("resetText",d[e]()),setTimeout(a.proxy(function(){d[e](null==f[b]?this.options[b]:f[b]),"loadingText"==b?(this.isLoading=!0,d.addClass(c).attr(c,c).prop(c,!0)):this.isLoading&&(this.isLoading=!1,d.removeClass(c).removeAttr(c).prop(c,!1))},this),0)},c.prototype.toggle=function(){var a=!0,b=this.$element.closest('[data-toggle="buttons"]');if(b.length){var c=this.$element.find("input");"radio"==c.prop("type")?(c.prop("checked")&&(a=!1),b.find(".active").removeCla
 ss("active"),this.$element.addClass("active")):"checkbox"==c.prop("type")&&(c.prop("checked")!==this.$element.hasClass("active")&&(a=!1),this.$element.toggleClass("active")),c.prop("checked",this.$element.hasClass("active")),a&&c.trigger("change")}else this.$element.attr("aria-pressed",!this.$element.hasClass("active")),this.$element.toggleClass("active")};var d=a.fn.button;a.fn.button=b,a.fn.button.Constructor=c,a.fn.button.noConflict=function(){return a.fn.button=d,this},a(document).on("click.bs.button.data-api",'[data-toggle^="button"]',function(c){var d=a(c.target).closest(".btn");b.call(d,"toggle"),a(c.target).is('input[type="radio"], input[type="checkbox"]')||(c.preventDefault(),d.is("input,button")?d.trigger("focus"):d.find("input:visible,button:visible").first().trigger("focus"))}).on("focus.bs.button.data-api blur.bs.button.data-api",'[data-toggle^="button"]',function(b){a(b.target).closest(".btn").toggleClass("focus",/^focus(in)?$/.test(b.type))})}(jQuery),+function(a){"us
 e strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.carousel"),f=a.extend({},c.DEFAULTS,d.data(),"object"==typeof b&&b),g="string"==typeof b?b:f.slide;e||d.data("bs.carousel",e=new c(this,f)),"number"==typeof b?e.to(b):g?e[g]():f.interval&&e.pause().cycle()})}var c=function(b,c){this.$element=a(b),this.$indicators=this.$element.find(".carousel-indicators"),this.options=c,this.paused=null,this.sliding=null,this.interval=null,this.$active=null,this.$items=null,this.options.keyboard&&this.$element.on("keydown.bs.carousel",a.proxy(this.keydown,this)),"hover"==this.options.pause&&!("ontouchstart"in document.documentElement)&&this.$element.on("mouseenter.bs.carousel",a.proxy(this.pause,this)).on("mouseleave.bs.carousel",a.proxy(this.cycle,this))};c.VERSION="3.3.7",c.TRANSITION_DURATION=600,c.DEFAULTS={interval:5e3,pause:"hover",wrap:!0,keyboard:!0},c.prototype.keydown=function(a){if(!/input|textarea/i.test(a.target.tagName)){switch(a.which){case 37:this.prev();b
 reak;case 39:this.next();break;default:return}a.preventDefault()}},c.prototype.cycle=function(b){return b||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(a.proxy(this.next,this),this.options.interval)),this},c.prototype.getItemIndex=function(a){return this.$items=a.parent().children(".item"),this.$items.index(a||this.$active)},c.prototype.getItemForDirection=function(a,b){var c=this.getItemIndex(b),d="prev"==a&&0===c||"next"==a&&c==this.$items.length-1;if(d&&!this.options.wrap)return b;var e="prev"==a?-1:1,f=(c+e)%this.$items.length;return this.$items.eq(f)},c.prototype.to=function(a){var b=this,c=this.getItemIndex(this.$active=this.$element.find(".item.active"));if(!(a>this.$items.length-1||a<0))return this.sliding?this.$element.one("slid.bs.carousel",function(){b.to(a)}):c==a?this.pause().cycle():this.slide(a>c?"next":"prev",this.$items.eq(a))},c.prototype.pause=function(b){return b||(this.paused=!0),thi
 s.$element.find(".next, .prev").length&&a.support.transition&&(this.$element.trigger(a.support.transition.end),this.cycle(!0)),this.interval=clearInterval(this.interval),this},c.prototype.next=function(){if(!this.sliding)return this.slide("next")},c.prototype.prev=function(){if(!this.sliding)return this.slide("prev")},c.prototype.slide=function(b,d){var e=this.$element.find(".item.active"),f=d||this.getItemForDirection(b,e),g=this.interval,h="next"==b?"left":"right",i=this;if(f.hasClass("active"))return this.sliding=!1;var j=f[0],k=a.Event("slide.bs.carousel",{relatedTarget:j,direction:h});if(this.$element.trigger(k),!k.isDefaultPrevented()){if(this.sliding=!0,g&&this.pause(),this.$indicators.length){this.$indicators.find(".active").removeClass("active");var l=a(this.$indicators.children()[this.getItemIndex(f)]);l&&l.addClass("active")}var m=a.Event("slid.bs.carousel",{relatedTarget:j,direction:h});return a.support.transition&&this.$element.hasClass("slide")?(f.addClass(b),f[0].offs
 etWidth,e.addClass(h),f.addClass(h),e.one("bsTransitionEnd",function(){f.removeClass([b,h].join(" ")).addClass("active"),e.removeClass(["active",h].join(" ")),i.sliding=!1,setTimeout(function(){i.$element.trigger(m)},0)}).emulateTransitionEnd(c.TRANSITION_DURATION)):(e.removeClass("active"),f.addClass("active"),this.sliding=!1,this.$element.trigger(m)),g&&this.cycle(),this}};var d=a.fn.carousel;a.fn.carousel=b,a.fn.carousel.Constructor=c,a.fn.carousel.noConflict=function(){return a.fn.carousel=d,this};var e=function(c){var d,e=a(this),f=a(e.attr("data-target")||(d=e.attr("href"))&&d.replace(/.*(?=#[^\s]+$)/,""));if(f.hasClass("carousel")){var g=a.extend({},f.data(),e.data()),h=e.attr("data-slide-to");h&&(g.interval=!1),b.call(f,g),h&&f.data("bs.carousel").to(h),c.preventDefault()}};a(document).on("click.bs.carousel.data-api","[data-slide]",e).on("click.bs.carousel.data-api","[data-slide-to]",e),a(window).on("load",function(){a('[data-ride="carousel"]').each(function(){var c=a(this);
 b.call(c,c.data())})})}(jQuery),+function(a){"use strict";function b(b){var c,d=b.attr("data-target")||(c=b.attr("href"))&&c.replace(/.*(?=#[^\s]+$)/,"");return a(d)}function c(b){return this.each(function(){var c=a(this),e=c.data("bs.collapse"),f=a.extend({},d.DEFAULTS,c.data(),"object"==typeof b&&b);!e&&f.toggle&&/show|hide/.test(b)&&(f.toggle=!1),e||c.data("bs.collapse",e=new d(this,f)),"string"==typeof b&&e[b]()})}var d=function(b,c){this.$element=a(b),this.options=a.extend({},d.DEFAULTS,c),this.$trigger=a('[data-toggle="collapse"][href="#'+b.id+'"],[data-toggle="collapse"][data-target="#'+b.id+'"]'),this.transitioning=null,this.options.parent?this.$parent=this.getParent():this.addAriaAndCollapsedClass(this.$element,this.$trigger),this.options.toggle&&this.toggle()};d.VERSION="3.3.7",d.TRANSITION_DURATION=350,d.DEFAULTS={toggle:!0},d.prototype.dimension=function(){var a=this.$element.hasClass("width");return a?"width":"height"},d.prototype.show=function(){if(!this.transitioning&
 &!this.$element.hasClass("in")){var b,e=this.$parent&&this.$parent.children(".panel").children(".in, .collapsing");if(!(e&&e.length&&(b=e.data("bs.collapse"),b&&b.transitioning))){var f=a.Event("show.bs.collapse");if(this.$element.trigger(f),!f.isDefaultPrevented()){e&&e.length&&(c.call(e,"hide"),b||e.data("bs.collapse",null));var g=this.dimension();this.$element.removeClass("collapse").addClass("collapsing")[g](0).attr("aria-expanded",!0),this.$trigger.removeClass("collapsed").attr("aria-expanded",!0),this.transitioning=1;var h=function(){this.$element.removeClass("collapsing").addClass("collapse in")[g](""),this.transitioning=0,this.$element.trigger("shown.bs.collapse")};if(!a.support.transition)return h.call(this);var i=a.camelCase(["scroll",g].join("-"));this.$element.one("bsTransitionEnd",a.proxy(h,this)).emulateTransitionEnd(d.TRANSITION_DURATION)[g](this.$element[0][i])}}}},d.prototype.hide=function(){if(!this.transitioning&&this.$element.hasClass("in")){var b=a.Event("hide.b
 s.collapse");if(this.$element.trigger(b),!b.isDefaultPrevented()){var c=this.dimension();this.$element[c](this.$element[c]())[0].offsetHeight,this.$element.addClass("collapsing").removeClass("collapse in").attr("aria-expanded",!1),this.$trigger.addClass("collapsed").attr("aria-expanded",!1),this.transitioning=1;var e=function(){this.transitioning=0,this.$element.removeClass("collapsing").addClass("collapse").trigger("hidden.bs.collapse")};return a.support.transition?void this.$element[c](0).one("bsTransitionEnd",a.proxy(e,this)).emulateTransitionEnd(d.TRANSITION_DURATION):e.call(this)}}},d.prototype.toggle=function(){this[this.$element.hasClass("in")?"hide":"show"]()},d.prototype.getParent=function(){return a(this.options.parent).find('[data-toggle="collapse"][data-parent="'+this.options.parent+'"]').each(a.proxy(function(c,d){var e=a(d);this.addAriaAndCollapsedClass(b(e),e)},this)).end()},d.prototype.addAriaAndCollapsedClass=function(a,b){var c=a.hasClass("in");a.attr("aria-expande
 d",c),b.toggleClass("collapsed",!c).attr("aria-expanded",c)};var e=a.fn.collapse;a.fn.collapse=c,a.fn.collapse.Constructor=d,a.fn.collapse.noConflict=function(){return a.fn.collapse=e,this},a(document).on("click.bs.collapse.data-api",'[data-toggle="collapse"]',function(d){var e=a(this);e.attr("data-target")||d.preventDefault();var f=b(e),g=f.data("bs.collapse"),h=g?"toggle":e.data();c.call(f,h)})}(jQuery),+function(a){"use strict";function b(b){var c=b.attr("data-target");c||(c=b.attr("href"),c=c&&/#[A-Za-z]/.test(c)&&c.replace(/.*(?=#[^\s]*$)/,""));var d=c&&a(c);return d&&d.length?d:b.parent()}function c(c){c&&3===c.which||(a(e).remove(),a(f).each(function(){var d=a(this),e=b(d),f={relatedTarget:this};e.hasClass("open")&&(c&&"click"==c.type&&/input|textarea/i.test(c.target.tagName)&&a.contains(e[0],c.target)||(e.trigger(c=a.Event("hide.bs.dropdown",f)),c.isDefaultPrevented()||(d.attr("aria-expanded","false"),e.removeClass("open").trigger(a.Event("hidden.bs.dropdown",f)))))}))}funct
 ion d(b){return this.each(function(){var c=a(this),d=c.data("bs.dropdown");d||c.data("bs.dropdown",d=new g(this)),"string"==typeof b&&d[b].call(c)})}var e=".dropdown-backdrop",f='[data-toggle="dropdown"]',g=function(b){a(b).on("click.bs.dropdown",this.toggle)};g.VERSION="3.3.7",g.prototype.toggle=function(d){var e=a(this);if(!e.is(".disabled, :disabled")){var f=b(e),g=f.hasClass("open");if(c(),!g){"ontouchstart"in document.documentElement&&!f.closest(".navbar-nav").length&&a(document.createElement("div")).addClass("dropdown-backdrop").insertAfter(a(this)).on("click",c);var h={relatedTarget:this};if(f.trigger(d=a.Event("show.bs.dropdown",h)),d.isDefaultPrevented())return;e.trigger("focus").attr("aria-expanded","true"),f.toggleClass("open").trigger(a.Event("shown.bs.dropdown",h))}return!1}},g.prototype.keydown=function(c){if(/(38|40|27|32)/.test(c.which)&&!/input|textarea/i.test(c.target.tagName)){var d=a(this);if(c.preventDefault(),c.stopPropagation(),!d.is(".disabled, :disabled")){v
 ar e=b(d),g=e.hasClass("open");if(!g&&27!=c.which||g&&27==c.which)return 27==c.which&&e.find(f).trigger("focus"),d.trigger("click");var h=" li:not(.disabled):visible a",i=e.find(".dropdown-menu"+h);if(i.length){var j=i.index(c.target);38==c.which&&j>0&&j--,40==c.which&&j<i.length-1&&j++,~j||(j=0),i.eq(j).trigger("focus")}}}};var h=a.fn.dropdown;a.fn.dropdown=d,a.fn.dropdown.Constructor=g,a.fn.dropdown.noConflict=function(){return a.fn.dropdown=h,this},a(document).on("click.bs.dropdown.data-api",c).on("click.bs.dropdown.data-api",".dropdown form",function(a){a.stopPropagation()}).on("click.bs.dropdown.data-api",f,g.prototype.toggle).on("keydown.bs.dropdown.data-api",f,g.prototype.keydown).on("keydown.bs.dropdown.data-api",".dropdown-menu",g.prototype.keydown)}(jQuery),+function(a){"use strict";function b(b,d){return this.each(function(){var e=a(this),f=e.data("bs.modal"),g=a.extend({},c.DEFAULTS,e.data(),"object"==typeof b&&b);f||e.data("bs.modal",f=new c(this,g)),"string"==typeof b?
 f[b](d):g.show&&f.show(d)})}var c=function(b,c){this.options=c,this.$body=a(document.body),this.$element=a(b),this.$dialog=this.$element.find(".modal-dialog"),this.$backdrop=null,this.isShown=null,this.originalBodyPad=null,this.scrollbarWidth=0,this.ignoreBackdropClick=!1,this.options.remote&&this.$element.find(".modal-content").load(this.options.remote,a.proxy(function(){this.$element.trigger("loaded.bs.modal")},this))};c.VERSION="3.3.7",c.TRANSITION_DURATION=300,c.BACKDROP_TRANSITION_DURATION=150,c.DEFAULTS={backdrop:!0,keyboard:!0,show:!0},c.prototype.toggle=function(a){return this.isShown?this.hide():this.show(a)},c.prototype.show=function(b){var d=this,e=a.Event("show.bs.modal",{relatedTarget:b});this.$element.trigger(e),this.isShown||e.isDefaultPrevented()||(this.isShown=!0,this.checkScrollbar(),this.setScrollbar(),this.$body.addClass("modal-open"),this.escape(),this.resize(),this.$element.on("click.dismiss.bs.modal",'[data-dismiss="modal"]',a.proxy(this.hide,this)),this.$dial
 og.on("mousedown.dismiss.bs.modal",function(){d.$element.one("mouseup.dismiss.bs.modal",function(b){a(b.target).is(d.$element)&&(d.ignoreBackdropClick=!0)})}),this.backdrop(function(){var e=a.support.transition&&d.$element.hasClass("fade");d.$element.parent().length||d.$element.appendTo(d.$body),d.$element.show().scrollTop(0),d.adjustDialog(),e&&d.$element[0].offsetWidth,d.$element.addClass("in"),d.enforceFocus();var f=a.Event("shown.bs.modal",{relatedTarget:b});e?d.$dialog.one("bsTransitionEnd",function(){d.$element.trigger("focus").trigger(f)}).emulateTransitionEnd(c.TRANSITION_DURATION):d.$element.trigger("focus").trigger(f)}))},c.prototype.hide=function(b){b&&b.preventDefault(),b=a.Event("hide.bs.modal"),this.$element.trigger(b),this.isShown&&!b.isDefaultPrevented()&&(this.isShown=!1,this.escape(),this.resize(),a(document).off("focusin.bs.modal"),this.$element.removeClass("in").off("click.dismiss.bs.modal").off("mouseup.dismiss.bs.modal"),this.$dialog.off("mousedown.dismiss.bs.m
 odal"),a.support.transition&&this.$element.hasClass("fade")?this.$element.one("bsTransitionEnd",a.proxy(this.hideModal,this)).emulateTransitionEnd(c.TRANSITION_DURATION):this.hideModal())},c.prototype.enforceFocus=function(){a(document).off("focusin.bs.modal").on("focusin.bs.modal",a.proxy(function(a){document===a.target||this.$element[0]===a.target||this.$element.has(a.target).length||this.$element.trigger("focus")},this))},c.prototype.escape=function(){this.isShown&&this.options.keyboard?this.$element.on("keydown.dismiss.bs.modal",a.proxy(function(a){27==a.which&&this.hide()},this)):this.isShown||this.$element.off("keydown.dismiss.bs.modal")},c.prototype.resize=function(){this.isShown?a(window).on("resize.bs.modal",a.proxy(this.handleUpdate,this)):a(window).off("resize.bs.modal")},c.prototype.hideModal=function(){var a=this;this.$element.hide(),this.backdrop(function(){a.$body.removeClass("modal-open"),a.resetAdjustments(),a.resetScrollbar(),a.$element.trigger("hidden.bs.modal")})
 },c.prototype.removeBackdrop=function(){this.$backdrop&&this.$backdrop.remove(),this.$backdrop=null},c.prototype.backdrop=function(b){var d=this,e=this.$element.hasClass("fade")?"fade":"";if(this.isShown&&this.options.backdrop){var f=a.support.transition&&e;if(this.$backdrop=a(document.createElement("div")).addClass("modal-backdrop "+e).appendTo(this.$body),this.$element.on("click.dismiss.bs.modal",a.proxy(function(a){return this.ignoreBackdropClick?void(this.ignoreBackdropClick=!1):void(a.target===a.currentTarget&&("static"==this.options.backdrop?this.$element[0].focus():this.hide()))},this)),f&&this.$backdrop[0].offsetWidth,this.$backdrop.addClass("in"),!b)return;f?this.$backdrop.one("bsTransitionEnd",b).emulateTransitionEnd(c.BACKDROP_TRANSITION_DURATION):b()}else if(!this.isShown&&this.$backdrop){this.$backdrop.removeClass("in");var g=function(){d.removeBackdrop(),b&&b()};a.support.transition&&this.$element.hasClass("fade")?this.$backdrop.one("bsTransitionEnd",g).emulateTransiti
 onEnd(c.BACKDROP_TRANSITION_DURATION):g()}else b&&b()},c.prototype.handleUpdate=function(){this.adjustDialog()},c.prototype.adjustDialog=function(){var a=this.$element[0].scrollHeight>document.documentElement.clientHeight;this.$element.css({paddingLeft:!this.bodyIsOverflowing&&a?this.scrollbarWidth:"",paddingRight:this.bodyIsOverflowing&&!a?this.scrollbarWidth:""})},c.prototype.resetAdjustments=function(){this.$element.css({paddingLeft:"",paddingRight:""})},c.prototype.checkScrollbar=function(){var a=window.innerWidth;if(!a){var b=document.documentElement.getBoundingClientRect();a=b.right-Math.abs(b.left)}this.bodyIsOverflowing=document.body.clientWidth<a,this.scrollbarWidth=this.measureScrollbar()},c.prototype.setScrollbar=function(){var a=parseInt(this.$body.css("padding-right")||0,10);this.originalBodyPad=document.body.style.paddingRight||"",this.bodyIsOverflowing&&this.$body.css("padding-right",a+this.scrollbarWidth)},c.prototype.resetScrollbar=function(){this.$body.css("padding
 -right",this.originalBodyPad)},c.prototype.measureScrollbar=function(){var a=document.createElement("div");a.className="modal-scrollbar-measure",this.$body.append(a);var b=a.offsetWidth-a.clientWidth;return this.$body[0].removeChild(a),b};var d=a.fn.modal;a.fn.modal=b,a.fn.modal.Constructor=c,a.fn.modal.noConflict=function(){return a.fn.modal=d,this},a(document).on("click.bs.modal.data-api",'[data-toggle="modal"]',function(c){var d=a(this),e=d.attr("href"),f=a(d.attr("data-target")||e&&e.replace(/.*(?=#[^\s]+$)/,"")),g=f.data("bs.modal")?"toggle":a.extend({remote:!/#/.test(e)&&e},f.data(),d.data());d.is("a")&&c.preventDefault(),f.one("show.bs.modal",function(a){a.isDefaultPrevented()||f.one("hidden.bs.modal",function(){d.is(":visible")&&d.trigger("focus")})}),b.call(f,g,this)})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tooltip"),f="object"==typeof b&&b;!e&&/destroy|hide/.test(b)||(e||d.data("bs.tooltip",e=new c(this,f)),"
 string"==typeof b&&e[b]())})}var c=function(a,b){this.type=null,this.options=null,this.enabled=null,this.timeout=null,this.hoverState=null,this.$element=null,this.inState=null,this.init("tooltip",a,b)};c.VERSION="3.3.7",c.TRANSITION_DURATION=150,c.DEFAULTS={animation:!0,placement:"top",selector:!1,template:'<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,container:!1,viewport:{selector:"body",padding:0}},c.prototype.init=function(b,c,d){if(this.enabled=!0,this.type=b,this.$element=a(c),this.options=this.getOptions(d),this.$viewport=this.options.viewport&&a(a.isFunction(this.options.viewport)?this.options.viewport.call(this,this.$element):this.options.viewport.selector||this.options.viewport),this.inState={click:!1,hover:!1,focus:!1},this.$element[0]instanceof document.constructor&&!this.options.selector)throw new Error("`selector` option must be specified when initializing "+
 this.type+" on the window.document object!");for(var e=this.options.trigger.split(" "),f=e.length;f--;){var g=e[f];if("click"==g)this.$element.on("click."+this.type,this.options.selector,a.proxy(this.toggle,this));else if("manual"!=g){var h="hover"==g?"mouseenter":"focusin",i="hover"==g?"mouseleave":"focusout";this.$element.on(h+"."+this.type,this.options.selector,a.proxy(this.enter,this)),this.$element.on(i+"."+this.type,this.options.selector,a.proxy(this.leave,this))}}this.options.selector?this._options=a.extend({},this.options,{trigger:"manual",selector:""}):this.fixTitle()},c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.getOptions=function(b){return b=a.extend({},this.getDefaults(),this.$element.data(),b),b.delay&&"number"==typeof b.delay&&(b.delay={show:b.delay,hide:b.delay}),b},c.prototype.getDelegateOptions=function(){var b={},c=this.getDefaults();return this._options&&a.each(this._options,function(a,d){c[a]!=d&&(b[a]=d)}),b},c.prototype.enter=function(b){v
 ar c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);return c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),b instanceof a.Event&&(c.inState["focusin"==b.type?"focus":"hover"]=!0),c.tip().hasClass("in")||"in"==c.hoverState?void(c.hoverState="in"):(clearTimeout(c.timeout),c.hoverState="in",c.options.delay&&c.options.delay.show?void(c.timeout=setTimeout(function(){"in"==c.hoverState&&c.show()},c.options.delay.show)):c.show())},c.prototype.isInStateTrue=function(){for(var a in this.inState)if(this.inState[a])return!0;return!1},c.prototype.leave=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);if(c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),b instanceof a.Event&&(c.inState["focusout"==b.type?"focus":"hover"]=!1),!c.isInStateTrue())return clearTimeout(c.timeout),c.hoverState="out",c.option
 s.delay&&c.options.delay.hide?void(c.timeout=setTimeout(function(){"out"==c.hoverState&&c.hide()},c.options.delay.hide)):c.hide()},c.prototype.show=function(){var b=a.Event("show.bs."+this.type);if(this.hasContent()&&this.enabled){this.$element.trigger(b);var d=a.contains(this.$element[0].ownerDocument.documentElement,this.$element[0]);if(b.isDefaultPrevented()||!d)return;var e=this,f=this.tip(),g=this.getUID(this.type);this.setContent(),f.attr("id",g),this.$element.attr("aria-describedby",g),this.options.animation&&f.addClass("fade");var h="function"==typeof this.options.placement?this.options.placement.call(this,f[0],this.$element[0]):this.options.placement,i=/\s?auto?\s?/i,j=i.test(h);j&&(h=h.replace(i,"")||"top"),f.detach().css({top:0,left:0,display:"block"}).addClass(h).data("bs."+this.type,this),this.options.container?f.appendTo(this.options.container):f.insertAfter(this.$element),this.$element.trigger("inserted.bs."+this.type);var k=this.getPosition(),l=f[0].offsetWidth,m=f[0
 ].offsetHeight;if(j){var n=h,o=this.getPosition(this.$viewport);h="bottom"==h&&k.bottom+m>o.bottom?"top":"top"==h&&k.top-m<o.top?"bottom":"right"==h&&k.right+l>o.width?"left":"left"==h&&k.left-l<o.left?"right":h,f.removeClass(n).addClass(h)}var p=this.getCalculatedOffset(h,k,l,m);this.applyPlacement(p,h);var q=function(){var a=e.hoverState;e.$element.trigger("shown.bs."+e.type),e.hoverState=null,"out"==a&&e.leave(e)};a.support.transition&&this.$tip.hasClass("fade")?f.one("bsTransitionEnd",q).emulateTransitionEnd(c.TRANSITION_DURATION):q()}},c.prototype.applyPlacement=function(b,c){var d=this.tip(),e=d[0].offsetWidth,f=d[0].offsetHeight,g=parseInt(d.css("margin-top"),10),h=parseInt(d.css("margin-left"),10);isNaN(g)&&(g=0),isNaN(h)&&(h=0),b.top+=g,b.left+=h,a.offset.setOffset(d[0],a.extend({using:function(a){d.css({top:Math.round(a.top),left:Math.round(a.left)})}},b),0),d.addClass("in");var i=d[0].offsetWidth,j=d[0].offsetHeight;"top"==c&&j!=f&&(b.top=b.top+f-j);var k=this.getViewport
 AdjustedDelta(c,b,i,j);k.left?b.left+=k.left:b.top+=k.top;var l=/top|bottom/.test(c),m=l?2*k.left-e+i:2*k.top-f+j,n=l?"offsetWidth":"offsetHeight";d.offset(b),this.replaceArrow(m,d[0][n],l)},c.prototype.replaceArrow=function(a,b,c){this.arrow().css(c?"left":"top",50*(1-a/b)+"%").css(c?"top":"left","")},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle();a.find(".tooltip-inner")[this.options.html?"html":"text"](b),a.removeClass("fade in top bottom left right")},c.prototype.hide=function(b){function d(){"in"!=e.hoverState&&f.detach(),e.$element&&e.$element.removeAttr("aria-describedby").trigger("hidden.bs."+e.type),b&&b()}var e=this,f=a(this.$tip),g=a.Event("hide.bs."+this.type);if(this.$element.trigger(g),!g.isDefaultPrevented())return f.removeClass("in"),a.support.transition&&f.hasClass("fade")?f.one("bsTransitionEnd",d).emulateTransitionEnd(c.TRANSITION_DURATION):d(),this.hoverState=null,this},c.prototype.fixTitle=function(){var a=this.$element;(a.attr("title")||"s
 tring"!=typeof a.attr("data-original-title"))&&a.attr("data-original-title",a.attr("title")||"").attr("title","")},c.prototype.hasContent=function(){return this.getTitle()},c.prototype.getPosition=function(b){b=b||this.$element;var c=b[0],d="BODY"==c.tagName,e=c.getBoundingClientRect();null==e.width&&(e=a.extend({},e,{width:e.right-e.left,height:e.bottom-e.top}));var f=window.SVGElement&&c instanceof window.SVGElement,g=d?{top:0,left:0}:f?null:b.offset(),h={scroll:d?document.documentElement.scrollTop||document.body.scrollTop:b.scrollTop()},i=d?{width:a(window).width(),height:a(window).height()}:null;return a.extend({},e,h,i,g)},c.prototype.getCalculatedOffset=function(a,b,c,d){return"bottom"==a?{top:b.top+b.height,left:b.left+b.width/2-c/2}:"top"==a?{top:b.top-d,left:b.left+b.width/2-c/2}:"left"==a?{top:b.top+b.height/2-d/2,left:b.left-c}:{top:b.top+b.height/2-d/2,left:b.left+b.width}},c.prototype.getViewportAdjustedDelta=function(a,b,c,d){var e={top:0,left:0};if(!this.$viewport)ret
 urn e;var f=this.options.viewport&&this.options.viewport.padding||0,g=this.getPosition(this.$viewport);if(/right|left/.test(a)){var h=b.top-f-g.scroll,i=b.top+f-g.scroll+d;h<g.top?e.top=g.top-h:i>g.top+g.height&&(e.top=g.top+g.height-i)}else{var j=b.left-f,k=b.left+f+c;j<g.left?e.left=g.left-j:k>g.right&&(e.left=g.left+g.width-k)}return e},c.prototype.getTitle=function(){var a,b=this.$element,c=this.options;return a=b.attr("data-original-title")||("function"==typeof c.title?c.title.call(b[0]):c.title)},c.prototype.getUID=function(a){do a+=~~(1e6*Math.random());while(document.getElementById(a));return a},c.prototype.tip=function(){if(!this.$tip&&(this.$tip=a(this.options.template),1!=this.$tip.length))throw new Error(this.type+" `template` option must consist of exactly 1 top-level element!");return this.$tip},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".tooltip-arrow")},c.prototype.enable=function(){this.enabled=!0},c.prototype.disable=function(){th
 is.enabled=!1},c.prototype.toggleEnabled=function(){this.enabled=!this.enabled},c.prototype.toggle=function(b){var c=this;b&&(c=a(b.currentTarget).data("bs."+this.type),c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c))),b?(c.inState.click=!c.inState.click,c.isInStateTrue()?c.enter(c):c.leave(c)):c.tip().hasClass("in")?c.leave(c):c.enter(c)},c.prototype.destroy=function(){var a=this;clearTimeout(this.timeout),this.hide(function(){a.$element.off("."+a.type).removeData("bs."+a.type),a.$tip&&a.$tip.detach(),a.$tip=null,a.$arrow=null,a.$viewport=null,a.$element=null})};var d=a.fn.tooltip;a.fn.tooltip=b,a.fn.tooltip.Constructor=c,a.fn.tooltip.noConflict=function(){return a.fn.tooltip=d,this}}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.popover"),f="object"==typeof b&&b;!e&&/destroy|hide/.test(b)||(e||d.data("bs.popover",e=new c(this,f)),"string"==typeof b&&e[b]())})}v
 ar c=function(a,b){this.init("popover",a,b)};if(!a.fn.tooltip)throw new Error("Popover requires tooltip.js");c.VERSION="3.3.7",c.DEFAULTS=a.extend({},a.fn.tooltip.Constructor.DEFAULTS,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'}),c.prototype=a.extend({},a.fn.tooltip.Constructor.prototype),c.prototype.constructor=c,c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle(),c=this.getContent();a.find(".popover-title")[this.options.html?"html":"text"](b),a.find(".popover-content").children().detach().end()[this.options.html?"string"==typeof c?"html":"append":"text"](c),a.removeClass("fade top bottom left right in"),a.find(".popover-title").html()||a.find(".popover-title").hide()},c.prototype.hasContent=function(){return this.getTitle()||this.getContent()},c.prototype.getContent=
 function(){var a=this.$element,b=this.options;return a.attr("data-content")||("function"==typeof b.content?b.content.call(a[0]):b.content)},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".arrow")};var d=a.fn.popover;a.fn.popover=b,a.fn.popover.Constructor=c,a.fn.popover.noConflict=function(){return a.fn.popover=d,this}}(jQuery),+function(a){"use strict";function b(c,d){this.$body=a(document.body),this.$scrollElement=a(a(c).is(document.body)?window:c),this.options=a.extend({},b.DEFAULTS,d),this.selector=(this.options.target||"")+" .nav li > a",this.offsets=[],this.targets=[],this.activeTarget=null,this.scrollHeight=0,this.$scrollElement.on("scroll.bs.scrollspy",a.proxy(this.process,this)),this.refresh(),this.process()}function c(c){return this.each(function(){var d=a(this),e=d.data("bs.scrollspy"),f="object"==typeof c&&c;e||d.data("bs.scrollspy",e=new b(this,f)),"string"==typeof c&&e[c]()})}b.VERSION="3.3.7",b.DEFAULTS={offset:10},b.prototype.getScrollH
 eight=function(){return this.$scrollElement[0].scrollHeight||Math.max(this.$body[0].scrollHeight,document.documentElement.scrollHeight)},b.prototype.refresh=function(){var b=this,c="offset",d=0;this.offsets=[],this.targets=[],this.scrollHeight=this.getScrollHeight(),a.isWindow(this.$scrollElement[0])||(c="position",d=this.$scrollElement.scrollTop()),this.$body.find(this.selector).map(function(){var b=a(this),e=b.data("target")||b.attr("href"),f=/^#./.test(e)&&a(e);return f&&f.length&&f.is(":visible")&&[[f[c]().top+d,e]]||null}).sort(function(a,b){return a[0]-b[0]}).each(function(){b.offsets.push(this[0]),b.targets.push(this[1])})},b.prototype.process=function(){var a,b=this.$scrollElement.scrollTop()+this.options.offset,c=this.getScrollHeight(),d=this.options.offset+c-this.$scrollElement.height(),e=this.offsets,f=this.targets,g=this.activeTarget;if(this.scrollHeight!=c&&this.refresh(),b>=d)return g!=(a=f[f.length-1])&&this.activate(a);if(g&&b<e[0])return this.activeTarget=null,this.
 clear();for(a=e.length;a--;)g!=f[a]&&b>=e[a]&&(void 0===e[a+1]||b<e[a+1])&&this.activate(f[a])},b.prototype.activate=function(b){
+this.activeTarget=b,this.clear();var c=this.selector+'[data-target="'+b+'"],'+this.selector+'[href="'+b+'"]',d=a(c).parents("li").addClass("active");d.parent(".dropdown-menu").length&&(d=d.closest("li.dropdown").addClass("active")),d.trigger("activate.bs.scrollspy")},b.prototype.clear=function(){a(this.selector).parentsUntil(this.options.target,".active").removeClass("active")};var d=a.fn.scrollspy;a.fn.scrollspy=c,a.fn.scrollspy.Constructor=b,a.fn.scrollspy.noConflict=function(){return a.fn.scrollspy=d,this},a(window).on("load.bs.scrollspy.data-api",function(){a('[data-spy="scroll"]').each(function(){var b=a(this);c.call(b,b.data())})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tab");e||d.data("bs.tab",e=new c(this)),"string"==typeof b&&e[b]()})}var c=function(b){this.element=a(b)};c.VERSION="3.3.7",c.TRANSITION_DURATION=150,c.prototype.show=function(){var b=this.element,c=b.closest("ul:not(.dropdown-menu)"),d=b.data("ta
 rget");if(d||(d=b.attr("href"),d=d&&d.replace(/.*(?=#[^\s]*$)/,"")),!b.parent("li").hasClass("active")){var e=c.find(".active:last a"),f=a.Event("hide.bs.tab",{relatedTarget:b[0]}),g=a.Event("show.bs.tab",{relatedTarget:e[0]});if(e.trigger(f),b.trigger(g),!g.isDefaultPrevented()&&!f.isDefaultPrevented()){var h=a(d);this.activate(b.closest("li"),c),this.activate(h,h.parent(),function(){e.trigger({type:"hidden.bs.tab",relatedTarget:b[0]}),b.trigger({type:"shown.bs.tab",relatedTarget:e[0]})})}}},c.prototype.activate=function(b,d,e){function f(){g.removeClass("active").find("> .dropdown-menu > .active").removeClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!1),b.addClass("active").find('[data-toggle="tab"]').attr("aria-expanded",!0),h?(b[0].offsetWidth,b.addClass("in")):b.removeClass("fade"),b.parent(".dropdown-menu").length&&b.closest("li.dropdown").addClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!0),e&&e()}var g=d.find("> .active"),h=e&
 &a.support.transition&&(g.length&&g.hasClass("fade")||!!d.find("> .fade").length);g.length&&h?g.one("bsTransitionEnd",f).emulateTransitionEnd(c.TRANSITION_DURATION):f(),g.removeClass("in")};var d=a.fn.tab;a.fn.tab=b,a.fn.tab.Constructor=c,a.fn.tab.noConflict=function(){return a.fn.tab=d,this};var e=function(c){c.preventDefault(),b.call(a(this),"show")};a(document).on("click.bs.tab.data-api",'[data-toggle="tab"]',e).on("click.bs.tab.data-api",'[data-toggle="pill"]',e)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.affix"),f="object"==typeof b&&b;e||d.data("bs.affix",e=new c(this,f)),"string"==typeof b&&e[b]()})}var c=function(b,d){this.options=a.extend({},c.DEFAULTS,d),this.$target=a(this.options.target).on("scroll.bs.affix.data-api",a.proxy(this.checkPosition,this)).on("click.bs.affix.data-api",a.proxy(this.checkPositionWithEventLoop,this)),this.$element=a(b),this.affixed=null,this.unpin=null,this.pinnedOffset=null,this.checkP
 osition()};c.VERSION="3.3.7",c.RESET="affix affix-top affix-bottom",c.DEFAULTS={offset:0,target:window},c.prototype.getState=function(a,b,c,d){var e=this.$target.scrollTop(),f=this.$element.offset(),g=this.$target.height();if(null!=c&&"top"==this.affixed)return e<c&&"top";if("bottom"==this.affixed)return null!=c?!(e+this.unpin<=f.top)&&"bottom":!(e+g<=a-d)&&"bottom";var h=null==this.affixed,i=h?e:f.top,j=h?g:b;return null!=c&&e<=c?"top":null!=d&&i+j>=a-d&&"bottom"},c.prototype.getPinnedOffset=function(){if(this.pinnedOffset)return this.pinnedOffset;this.$element.removeClass(c.RESET).addClass("affix");var a=this.$target.scrollTop(),b=this.$element.offset();return this.pinnedOffset=b.top-a},c.prototype.checkPositionWithEventLoop=function(){setTimeout(a.proxy(this.checkPosition,this),1)},c.prototype.checkPosition=function(){if(this.$element.is(":visible")){var b=this.$element.height(),d=this.options.offset,e=d.top,f=d.bottom,g=Math.max(a(document).height(),a(document.body).height());"o
 bject"!=typeof d&&(f=e=d),"function"==typeof e&&(e=d.top(this.$element)),"function"==typeof f&&(f=d.bottom(this.$element));var h=this.getState(g,b,e,f);if(this.affixed!=h){null!=this.unpin&&this.$element.css("top","");var i="affix"+(h?"-"+h:""),j=a.Event(i+".bs.affix");if(this.$element.trigger(j),j.isDefaultPrevented())return;this.affixed=h,this.unpin="bottom"==h?this.getPinnedOffset():null,this.$element.removeClass(c.RESET).addClass(i).trigger(i.replace("affix","affixed")+".bs.affix")}"bottom"==h&&this.$element.offset({top:g-b-f})}};var d=a.fn.affix;a.fn.affix=b,a.fn.affix.Constructor=c,a.fn.affix.noConflict=function(){return a.fn.affix=d,this},a(window).on("load",function(){a('[data-spy="affix"]').each(function(){var c=a(this),d=c.data();d.offset=d.offset||{},null!=d.offsetBottom&&(d.offset.bottom=d.offsetBottom),null!=d.offsetTop&&(d.offset.top=d.offsetTop),b.call(c,d)})})}(jQuery);
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: YARN-7530. Refactored YARN service API project location. Contributed by Chandni Singh

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServerWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServerWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServerWebApp.java
deleted file mode 100644
index f4acd94..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServerWebApp.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.service.webapp;
-
-import org.apache.hadoop.http.HttpServer2;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.AuthenticationFilterInitializer;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.service.AbstractService;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
-import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
-import org.eclipse.jetty.webapp.Configuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import static org.apache.hadoop.yarn.conf.YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY;
-import static org.apache.hadoop.yarn.conf.YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY;
-import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.*;
-
-/**
- * This class launches the web service using Hadoop HttpServer2 (which uses
- * an embedded Jetty container). This is the entry point to your service.
- * The Java command used to launch this app should call the main method.
- */
-public class ApiServerWebApp extends AbstractService {
-  private static final Logger logger = LoggerFactory
-      .getLogger(ApiServerWebApp.class);
-  private static final String SEP = ";";
-
-  // REST API server for YARN native services
-  private HttpServer2 apiServer;
-  private InetSocketAddress bindAddress;
-
-  public static void main(String[] args) throws IOException {
-    ApiServerWebApp apiWebApp = new ApiServerWebApp();
-    try {
-      apiWebApp.init(new YarnConfiguration());
-      apiWebApp.serviceStart();
-    } catch (Exception e) {
-      logger.error("Got exception starting", e);
-      apiWebApp.close();
-    }
-  }
-
-  public ApiServerWebApp() {
-    super(ApiServerWebApp.class.getName());
-  }
-
-  @Override
-  protected void serviceStart() throws Exception {
-    bindAddress = getConfig().getSocketAddr(API_SERVER_ADDRESS,
-        DEFAULT_API_SERVER_ADDRESS, DEFAULT_API_SERVER_PORT);
-    logger.info("YARN API server running on " + bindAddress);
-    if (UserGroupInformation.isSecurityEnabled()) {
-      doSecureLogin(getConfig());
-    }
-    startWebApp();
-    super.serviceStart();
-  }
-
-  @Override
-  protected void serviceStop() throws Exception {
-    if (apiServer != null) {
-      apiServer.stop();
-    }
-    super.serviceStop();
-  }
-
-  private void doSecureLogin(org.apache.hadoop.conf.Configuration conf)
-      throws IOException {
-    SecurityUtil.login(conf, YarnConfiguration.RM_KEYTAB,
-        YarnConfiguration.RM_PRINCIPAL, bindAddress.getHostName());
-    addFilters(conf);
-  }
-
-  private void addFilters(org.apache.hadoop.conf.Configuration conf) {
-    // Always load pseudo authentication filter to parse "user.name" in an URL
-    // to identify a HTTP request's user.
-    boolean hasHadoopAuthFilterInitializer = false;
-    String filterInitializerConfKey = "hadoop.http.filter.initializers";
-    Class<?>[] initializersClasses =
-        conf.getClasses(filterInitializerConfKey);
-    List<String> targets = new ArrayList<String>();
-    if (initializersClasses != null) {
-      for (Class<?> initializer : initializersClasses) {
-        if (initializer.getName().equals(
-            AuthenticationFilterInitializer.class.getName())) {
-          hasHadoopAuthFilterInitializer = true;
-          break;
-        }
-        targets.add(initializer.getName());
-      }
-    }
-    if (!hasHadoopAuthFilterInitializer) {
-      targets.add(AuthenticationFilterInitializer.class.getName());
-      conf.set(filterInitializerConfKey, StringUtils.join(",", targets));
-    }
-  }
-
-  private void startWebApp() throws IOException {
-    URI uri = URI.create("http://" + NetUtils.getHostPortString(bindAddress));
-
-    apiServer = new HttpServer2.Builder()
-        .setName("api-server")
-        .setConf(getConfig())
-        .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
-        .setUsernameConfKey(RM_WEBAPP_SPNEGO_USER_NAME_KEY)
-        .setKeytabConfKey(RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY)
-        .addEndpoint(uri).build();
-
-    String apiPackages =
-        ApiServer.class.getPackage().getName() + SEP
-            + GenericExceptionHandler.class.getPackage().getName() + SEP
-            + YarnJacksonJaxbJsonProvider.class.getPackage().getName();
-    apiServer.addJerseyResourcePackage(apiPackages, "/*");
-
-    try {
-      logger.info("Service starting up. Logging start...");
-      apiServer.start();
-      logger.info("Server status = {}", apiServer.toString());
-      for (Configuration conf : apiServer.getWebAppContext()
-          .getConfigurations()) {
-        logger.info("Configurations = {}", conf);
-      }
-      logger.info("Context Path = {}", Collections.singletonList(
-          apiServer.getWebAppContext().getContextPath()));
-      logger.info("ResourceBase = {}", Collections.singletonList(
-          apiServer.getWebAppContext().getResourceBase()));
-      logger.info("War = {}", Collections
-          .singletonList(apiServer.getWebAppContext().getWar()));
-    } catch (Exception ex) {
-      logger.error("Hadoop HttpServer2 App **failed**", ex);
-      throw ex;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/package-info.java
deleted file mode 100644
index 1bdf05a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/package-info.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package org.apache.hadoop.yarn.service.webapp contains classes to be used
- * for YARN Services API.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-package org.apache.hadoop.yarn.service.webapp;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
deleted file mode 100644
index b7ad6c9..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
+++ /dev/null
@@ -1,444 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-## Examples
-
-### Create a simple single-component service with most attribute values as defaults
-POST URL - http://localhost:8088/app/v1/services
-
-##### POST Request JSON
-```json
-{
-  "name": "hello-world",
-  "version": "1.0.0",
-  "description": "hello world example",
-  "components" :
-    [
-      {
-        "name": "hello",
-        "number_of_containers": 2,
-        "artifact": {
-          "id": "nginx:latest",
-          "type": "DOCKER"
-        },
-        "launch_command": "./start_nginx.sh",
-        "resource": {
-          "cpus": 1,
-          "memory": "256"
-        }
-      }
-    ]
-}
-```
-
-##### GET Response JSON
-GET URL - http://localhost:8088/app/v1/services/hello-world
-
-Note, lifetime value of -1 means unlimited lifetime.
-
-```json
-{
-    "name": "hello-world",
-    "version": "1.0.0",
-    "description": "hello world example",
-    "id": "application_1503963985568_0002",
-    "lifetime": -1,
-    "state": "STABLE",
-    "components": [
-        {
-            "name": "hello",
-            "state": "STABLE",
-            "resource": {
-                "cpus": 1,
-                "memory": "256"
-            },
-            "configuration": {
-                "properties": {},
-                "env": {},
-                "files": []
-            },
-            "quicklinks": [],
-            "containers": [
-                {
-                    "id": "container_e03_1503963985568_0002_01_000002",
-                    "ip": "10.22.8.143",
-                    "hostname": "ctr-e03-1503963985568-0002-01-000002.example.site",
-                    "state": "READY",
-                    "launch_time": 1504051512412,
-                    "bare_host": "host100.cloud.com",
-                    "component_instance_name": "hello-0"
-                },
-                {
-                    "id": "container_e03_1503963985568_0002_01_000003",
-                    "ip": "10.22.8.144",
-                    "hostname": "ctr-e03-1503963985568-0002-01-000003.example.site",
-                    "state": "READY",
-                    "launch_time": 1504051536450,
-                    "bare_host": "host100.cloud.com",
-                    "component_instance_name": "hello-1"
-                }
-            ],
-            "launch_command": "./start_nginx.sh",
-            "number_of_containers": 1,
-            "run_privileged_container": false
-        }
-    ],
-    "configuration": {
-        "properties": {},
-        "env": {},
-        "files": []
-    },
-    "quicklinks": {}
-}
-
-```
-### Update to modify the lifetime of a service
-PUT URL - http://localhost:8088/app/v1/services/hello-world
-
-##### PUT Request JSON
-
-Note, irrespective of what the current lifetime value is, this update request will set the lifetime of the service to be 3600 seconds (1 hour) from the time the request is submitted. Hence, if a a service has remaining lifetime of 5 mins (say) and would like to extend it to an hour OR if an application has remaining lifetime of 5 hours (say) and would like to reduce it down to an hour, then for both scenarios you need to submit the same request below.
-
-```json
-{
-  "lifetime": 3600
-}
-```
-### Stop a service
-PUT URL - http://localhost:8088/app/v1/services/hello-world
-
-##### PUT Request JSON
-```json
-{
-  "state": "STOPPED"
-}
-```
-
-### Start a service
-PUT URL - http://localhost:8088/app/v1/services/hello-world
-
-##### PUT Request JSON
-```json
-{
-  "state": "STARTED"
-}
-```
-
-### Update to flex up/down the number of containers (instances) of a component of a service
-PUT URL - http://localhost:8088/app/v1/services/hello-world/components/hello
-
-##### PUT Request JSON
-```json
-{
-  "number_of_containers": 3
-}
-```
-
-Alternatively, you can specify the entire "components" section instead.
-
-PUT URL - http://localhost:8088/app/v1/services/hello-world
-##### PUT Request JSON
-```json
-{
-  "state": "FLEX",
-  "components" :
-    [
-      {
-        "name": "hello",
-        "number_of_containers": 3
-      }
-    ]
-}
-```
-
-### Destroy a service
-DELETE URL - http://localhost:8088/app/v1/services/hello-world
-
-***
-
-### Create a complicated service  - HBase
-POST URL - http://localhost:8088:/app/v1/services/hbase-app-1
-
-##### POST Request JSON
-
-```json
-{
-  "name": "hbase-app-1",
-  "version": "1.0.0",
-  "description": "hbase service",
-  "lifetime": "3600",
-  "components": [
-    {
-      "name": "hbasemaster",
-      "number_of_containers": 1,
-      "artifact": {
-        "id": "hbase:latest",
-        "type": "DOCKER"
-      },
-      "launch_command": "/usr/hdp/current/hbase-master/bin/hbase master start",
-      "resource": {
-        "cpus": 1,
-        "memory": "2048"
-      },
-      "configuration": {
-        "env": {
-          "HBASE_LOG_DIR": "<LOG_DIR>"
-        },
-        "files": [
-          {
-            "type": "XML",
-            "dest_file": "/etc/hadoop/conf/core-site.xml",
-            "properties": {
-              "fs.defaultFS": "${CLUSTER_FS_URI}"
-            }
-          },
-          {
-            "type": "XML",
-            "dest_file": "/etc/hbase/conf/hbase-site.xml",
-            "properties": {
-              "hbase.cluster.distributed": "true",
-              "hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}",
-              "hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase",
-              "zookeeper.znode.parent": "${SERVICE_ZK_PATH}",
-              "hbase.master.hostname": "hbasemaster.${SERVICE_NAME}.${USER}.${DOMAIN}",
-              "hbase.master.info.port": "16010"
-            }
-          }
-        ]
-      }
-    },
-    {
-      "name": "regionserver",
-      "number_of_containers": 3,
-      "artifact": {
-        "id": "hbase:latest",
-        "type": "DOCKER"
-      },
-      "launch_command": "/usr/hdp/current/hbase-regionserver/bin/hbase regionserver start",
-      "resource": {
-        "cpus": 1,
-        "memory": "2048"
-      },
-      "configuration": {
-        "env": {
-          "HBASE_LOG_DIR": "<LOG_DIR>"
-        },
-        "files": [
-          {
-            "type": "XML",
-            "dest_file": "/etc/hadoop/conf/core-site.xml",
-            "properties": {
-              "fs.defaultFS": "${CLUSTER_FS_URI}"
-            }
-          },
-          {
-            "type": "XML",
-            "dest_file": "/etc/hbase/conf/hbase-site.xml",
-            "properties": {
-              "hbase.cluster.distributed": "true",
-              "hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}",
-              "hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase",
-              "zookeeper.znode.parent": "${SERVICE_ZK_PATH}",
-              "hbase.master.hostname": "hbasemaster.${SERVICE_NAME}.${USER}.${DOMAIN}",
-              "hbase.master.info.port": "16010",
-              "hbase.regionserver.hostname": "${COMPONENT_INSTANCE_NAME}.${SERVICE_NAME}.${USER}.${DOMAIN}"
-            }
-          }
-        ]
-      }
-    }
-  ],
-  "quicklinks": {
-    "HBase Master Status UI": "http://hbasemaster0.${SERVICE_NAME}.${USER}.${DOMAIN}:16010/master-status",
-    "Proxied HBase Master Status UI": "http://app-proxy/${DOMAIN}/${USER}/${SERVICE_NAME}/hbasemaster/16010/"
-  }
-}
-```
-
-### Create a service requesting GPUs in addition to CPUs and RAM
-POST URL - http://localhost:8088/app/v1/services
-
-##### POST Request JSON
-```json
-{
-  "name": "hello-world",
-  "version": "1.0.0",
-  "description": "hello world example with GPUs",
-  "components" :
-    [
-      {
-        "name": "hello",
-        "number_of_containers": 2,
-        "artifact": {
-          "id": "nginx:latest",
-          "type": "DOCKER"
-        },
-        "launch_command": "./start_nginx.sh",
-        "resource": {
-          "cpus": 1,
-          "memory": "256",
-          "additional" : {
-            "yarn.io/gpu" : {
-              "value" : 4,
-              "unit" : ""
-            }
-          }
-        }
-      }
-    ]
-}
-```
-
-### Create a service with a component requesting anti-affinity placement policy
-POST URL - http://localhost:8088/app/v1/services
-
-##### POST Request JSON
-```json
-{
-  "name": "hello-world",
-  "version": "1.0.0",
-  "description": "hello world example with anti-affinity",
-  "components" :
-    [
-      {
-        "name": "hello",
-        "number_of_containers": 3,
-        "artifact": {
-          "id": "nginx:latest",
-          "type": "DOCKER"
-        },
-        "launch_command": "./start_nginx.sh",
-        "resource": {
-          "cpus": 1,
-          "memory": "256"
-        },
-        "placement_policy": {
-          "constraints": [
-            {
-              "type": "ANTI_AFFINITY",
-              "scope": "NODE",
-              "node_attributes": {
-                "os": ["linux", "windows"],
-                "fault_domain": ["fd1", "fd2"]
-              },
-              "node_partitions": [
-                "gpu",
-                "fast-disk"
-              ],
-              "target_tags": [
-                "hello"
-              ]
-            }
-          ]
-        }
-      }
-    ]
-}
-```
-
-##### GET Response JSON
-GET URL - http://localhost:8088/app/v1/services/hello-world
-
-Note, for an anti-affinity component no more than 1 container will be allocated
-in a specific node. In this example, 3 containers have been requested by
-component "hello". All 3 containers were allocated because the cluster had 3 or
-more NMs. If the cluster had less than 3 NMs then less than 3 containers would
-be allocated. In cases when the number of allocated containers are less than the
-number of requested containers, the component and the service will be in
-non-STABLE state.
-
-```json
-{
-    "name": "hello-world",
-    "version": "1.0.0",
-    "description": "hello world example with anti-affinity",
-    "id": "application_1503963985568_0003",
-    "lifetime": -1,
-    "state": "STABLE",
-    "components": [
-        {
-            "name": "hello",
-            "state": "STABLE",
-            "resource": {
-                "cpus": 1,
-                "memory": "256"
-            },
-            "placement_policy": {
-              "constraints": [
-                {
-                  "type": "ANTI_AFFINITY",
-                  "scope": "NODE",
-                  "node_attributes": {
-                    "os": ["linux", "windows"],
-                    "fault_domain": ["fd1", "fd2"]
-                  },
-                  "node_partitions": [
-                    "gpu",
-                    "fast-disk"
-                  ],
-                  "target_tags": [
-                    "hello"
-                  ]
-                }
-              ]
-            },
-            "configuration": {
-                "properties": {},
-                "env": {},
-                "files": []
-            },
-            "quicklinks": [],
-            "containers": [
-                {
-                    "id": "container_e03_1503963985568_0003_01_000002",
-                    "ip": "10.22.8.143",
-                    "hostname": "ctr-e03-1503963985568-0003-01-000002.example.site",
-                    "state": "READY",
-                    "launch_time": 1504051512412,
-                    "bare_host": "host100.cloud.com",
-                    "component_instance_name": "hello-0"
-                },
-                {
-                    "id": "container_e03_1503963985568_0003_01_000003",
-                    "ip": "10.22.8.144",
-                    "hostname": "ctr-e03-1503963985568-0003-01-000003.example.site",
-                    "state": "READY",
-                    "launch_time": 1504051536450,
-                    "bare_host": "host101.cloud.com",
-                    "component_instance_name": "hello-1"
-                },
-                {
-                    "id": "container_e03_1503963985568_0003_01_000004",
-                    "ip": "10.22.8.145",
-                    "hostname": "ctr-e03-1503963985568-0003-01-000004.example.site",
-                    "state": "READY",
-                    "launch_time": 1504051536450,
-                    "bare_host": "host102.cloud.com",
-                    "component_instance_name": "hello-2"
-                }
-            ],
-            "launch_command": "./start_nginx.sh",
-            "number_of_containers": 1,
-            "run_privileged_container": false
-        }
-    ],
-    "configuration": {
-        "properties": {},
-        "env": {},
-        "files": []
-    },
-    "quicklinks": {}
-}
-```
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
deleted file mode 100644
index d90ae06..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
+++ /dev/null
@@ -1,594 +0,0 @@
-# Hadoop YARN REST APIs for services v1 spec in YAML
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-swagger: '2.0'
-info:
-  title: "YARN Simplified API layer for services"
-  description: |
-    Bringing a new service on YARN today is not a simple experience. The APIs of existing
-    frameworks are either too low level (native YARN), require writing new code (for frameworks with programmatic APIs)
-    or writing a complex spec (for declarative frameworks).
-
-    This simplified REST API can be used to create and manage the lifecycle of YARN services.
-    In most cases, the application owner will not be forced to make any changes to their applications.
-    This is primarily true if the application is packaged with containerization technologies like Docker.
-
-    This document describes the API specifications (aka. YarnFile) for deploying/managing
-    containerized services on YARN. The same JSON spec can be used for both REST API
-    and CLI to manage the services.
-
-  version: "1.0.0"
-  license:
-    name: Apache 2.0
-    url: http://www.apache.org/licenses/LICENSE-2.0.html
-# the domain of the service
-host: localhost
-port: 8088(default)
-# array of all schemes that your API supports
-schemes:
-  - http
-consumes:
-  - application/json
-produces:
-  - application/json
-paths:
-  /app/v1/services/version:
-    get:
-      summary: Get current version of the API server.
-      description: Get current version of the API server.
-      responses:
-        200:
-          description: Successful request
-
-  /app/v1/services:
-    get:
-      summary: (TBD) List of services running in the cluster.
-      description: Get a list of all currently running services (response includes a minimal projection of the service info). For more details do a GET on a specific service name.
-      responses:
-        200:
-          description: An array of services
-          schema:
-            type: array
-            items:
-              $ref: '#/definitions/Service'
-        default:
-          description: Unexpected error
-          schema:
-            $ref: '#/definitions/ServiceStatus'
-    post:
-      summary: Create a service
-      description: Create a service. The request JSON is a service object with details required for creation. If the request is successful it returns 202 Accepted. A success of this API only confirms success in submission of the service creation request. There is no guarantee that the service will actually reach a RUNNING state. Resource availability and several other factors determines if the service will be deployed in the cluster. It is expected that clients would subsequently call the GET API to get details of the service and determine its state.
-      parameters:
-        - name: Service
-          in: body
-          description: Service request object
-          required: true
-          schema:
-            $ref: '#/definitions/Service'
-      responses:
-        202:
-          description: The request to create a service is accepted
-        400:
-          description: Invalid service definition provided in the request body
-        500:
-          description: Failed to create a service
-        default:
-          description: Unexpected error
-          schema:
-            $ref: '#/definitions/ServiceStatus'
-
-  /app/v1/services/{service_name}:
-    put:
-      summary: Update a service or upgrade the binary version of the components of a running service
-      description: Update the runtime properties of a service. Currently the following operations are supported - update lifetime, stop/start a service.
-                   The PUT operation is also used to orchestrate an upgrade of the service containers to a newer version of their artifacts (TBD).
-      parameters:
-        - name: service_name
-          in: path
-          description: Service name
-          required: true
-          type: string
-        - name: Service
-          in: body
-          description: The updated service definition. It can contain the updated lifetime of a service or the desired state (STOPPED/STARTED) of a service to initiate a start/stop operation against the specified service
-          required: true
-          schema:
-            $ref: '#/definitions/Service'
-      responses:
-        204:
-          description: Update or upgrade was successful
-        404:
-          description: Service does not exist
-        default:
-          description: Unexpected error
-          schema:
-            $ref: '#/definitions/ServiceStatus'
-    delete:
-      summary: Destroy a service
-      description: Destroy a service and release all resources. This API might have to return JSON data providing location of logs (TBD), etc.
-      parameters:
-        - name: service_name
-          in: path
-          description: Service name
-          required: true
-          type: string
-      responses:
-        204:
-          description: Destroy was successful
-        404:
-          description: Service does not exist
-        default:
-          description: Unexpected error
-          schema:
-            $ref: '#/definitions/ServiceStatus'
-    get:
-      summary: Get details of a service.
-      description: Return the details (including containers) of a running service
-      parameters:
-        - name: service_name
-          in: path
-          description: Service name
-          required: true
-          type: string
-      responses:
-        200:
-          description: a service object
-          schema:
-            type: object
-            items:
-              $ref: '#/definitions/Service'
-          examples:
-            service_name: logsearch
-            artifact:
-              id: logsearch:latest
-              type: docker
-        404:
-          description: Service does not exist
-        default:
-          description: Unexpected error
-          schema:
-            $ref: '#/definitions/ServiceStatus'
-  /app/v1/services/{service_name}/components/{component_name}:
-    put:
-      summary: Flex a component's number of instances.
-      description: Set a component's desired number of instanes
-      parameters:
-        - name: service_name
-          in: path
-          description: Service name
-          required: true
-          type: string
-        - name: component_name
-          in: path
-          description: Component name
-          required: true
-          type: string
-        - name: Component
-          in: body
-          description: The definition of a component which contains the updated number of instances.
-          required: true
-          schema:
-            $ref: '#/definitions/Component'
-      responses:
-        200:
-          description: Flex was successful
-        404:
-          description: Service does not exist
-        default:
-          description: Unexpected error
-          schema:
-            $ref: '#/definitions/ServiceStatus'
-definitions:
-  Service:
-    description: a service resource has the following attributes.
-    required:
-      - name
-      - version
-    properties:
-      name:
-        type: string
-        description: A unique service name. If Registry DNS is enabled, the max length is 63 characters.
-      version:
-        type: string
-        description: Version of the service.
-      description:
-        type: string
-        description: Description of the service.
-      id:
-        type: string
-        description: A unique service id.
-      artifact:
-        description: The default artifact for all components of the service except the components which has Artifact type set to SERVICE (optional).
-        $ref: '#/definitions/Artifact'
-      resource:
-        description: The default resource for all components of the service (optional).
-        $ref: '#/definitions/Resource'
-      launch_time:
-        type: string
-        format: date
-        description: The time when the service was created, e.g. 2016-03-16T01:01:49.000Z.
-      number_of_running_containers:
-        type: integer
-        format: int64
-        description: In get response this provides the total number of running containers for this service (across all components) at the time of request. Note, a subsequent request can return a different number as and when more containers get allocated until it reaches the total number of containers or if a flex request has been made between the two requests.
-      lifetime:
-        type: integer
-        format: int64
-        description: Life time (in seconds) of the service from the time it reaches the STARTED state (after which it is automatically destroyed by YARN). For unlimited lifetime do not set a lifetime value.
-      components:
-        description: Components of a service.
-        type: array
-        items:
-          $ref: '#/definitions/Component'
-      configuration:
-        description: Config properties of a service. Configurations provided at the service/global level are available to all the components. Specific properties can be overridden at the component level.
-        $ref: '#/definitions/Configuration'
-      state:
-        description: State of the service. Specifying a value for this attribute for the PUT payload means update the service to this desired state.
-        $ref: '#/definitions/ServiceState'
-      quicklinks:
-        type: object
-        description: A blob of key-value pairs of quicklinks to be exported for a service.
-        additionalProperties:
-          type: string
-      queue:
-        type: string
-        description: The YARN queue that this service should be submitted to.
-      kerberos_principal:
-        description: The principal info of the user who launches the service.
-        $ref: '#/definitions/KerberosPrincipal'
-      docker_client_config:
-        type: string
-        description: URI of the file containing the docker client configuration (e.g. hdfs:///tmp/config.json).
-  ResourceInformation:
-    description:
-      ResourceInformation determines unit/value of resource types in addition to memory and vcores. It will be part of Resource object.
-    properties:
-      value:
-        type: integer
-        format: int64
-        description: Integer value of the resource.
-      unit:
-        type: string
-        description: Unit of the resource, acceptable values are - p/n/u/m/k/M/G/T/P/Ki/Mi/Gi/Ti/Pi. By default it is empty means no unit.
-  Resource:
-    description:
-      Resource determines the amount of resources (vcores, memory, network, etc.) usable by a container. This field determines the resource to be applied for all the containers of a component or service. The resource specified at the service (or global) level can be overriden at the component level. Only one of profile OR cpu & memory are expected. It raises a validation exception otherwise.
-    properties:
-      profile:
-        type: string
-        description: Each resource profile has a unique id which is associated with a cluster-level predefined memory, cpus, etc.
-      cpus:
-        type: integer
-        format: int32
-        description: Amount of vcores allocated to each container (optional but overrides cpus in profile if specified).
-      memory:
-        type: string
-        description: Amount of memory allocated to each container (optional but overrides memory in profile if specified). Currently accepts only an integer value and default unit is in MB.
-      additional:
-        type: object
-        additionalProperties:
-          $ref: '#/definitions/ResourceInformation'
-        description: A map of resource type name to resource type information. Including value (integer), and unit (string). This will be used to specify resource other than cpu and memory. Please refer to example below.
-  PlacementPolicy:
-    description: Advanced placement policy of the components of a service.
-    required:
-      - constraints
-    properties:
-      constraints:
-        description: Placement constraint details.
-        type: array
-        items:
-          $ref: '#/definitions/PlacementConstraint'
-  PlacementConstraint:
-    description: Placement constraint details.
-    required:
-      - type
-      - scope
-    properties:
-      name:
-        description: An optional name associated to this constraint.
-        type: string
-        example: C1
-      type:
-        description: The type of placement.
-        $ref: '#/definitions/PlacementType'
-      scope:
-        description: The scope of placement.
-        $ref: '#/definitions/PlacementScope'
-      target_tags:
-        description: The name of the components that this component's placement policy is depending upon are added as target tags. So for affinity say, this component's containers are requesting to be placed on hosts where containers of the target tag component(s) are running on. Target tags can also contain the name of this component, in which case it implies that for anti-affinity say, no more than one container of this component can be placed on a host. Similarly, for cardinality, it would mean that containers of this component is requesting to be placed on hosts where at least minCardinality but no more than maxCardinality containers of the target tag component(s) are running.
-        type: array
-        items:
-          type: string
-      node_attributes:
-        description: Node attributes are a set of key:value(s) pairs associated with nodes.
-        type: object
-        additionalProperties:
-          type: array
-          items:
-            type: string
-      node_partitions:
-        description: Node partitions where the containers of this component can run.
-        type: array
-        items:
-          type: string
-      min_cardinality:
-        type: integer
-        format: int64
-        description: When placement type is cardinality, the minimum number of containers of the depending component that a host should have, where containers of this component can be allocated on.
-        example: 2
-      max_cardinality:
-        type: integer
-        format: int64
-        description: When placement type is cardinality, the maximum number of containers of the depending component that a host should have, where containers of this component can be allocated on.
-        example: 3
-  PlacementType:
-    description: The type of placement - affinity/anti-affinity/affinity-with-cardinality with containers of another component or containers of the same component (self).
-    properties:
-      type:
-        type: string
-        enum:
-          - AFFINITY
-          - ANTI_AFFINITY
-          - AFFINITY_WITH_CARDINALITY
-  PlacementScope:
-    description: The scope of placement for the containers of a component.
-    properties:
-      type:
-        type: string
-        enum:
-          - NODE
-          - RACK
-  Artifact:
-    description: Artifact of a service component. If not specified, component will just run the bare launch command and no artifact will be localized.
-    required:
-    - id
-    properties:
-      id:
-        type: string
-        description: Artifact id. Examples are package location uri for tarball based services, image name for docker, name of service, etc.
-      type:
-        type: string
-        description: Artifact type, like docker, tarball, etc. (optional). For TARBALL type, the specified tarball will be localized to the container local working directory under a folder named lib. For SERVICE type, the service specified will be read and its components will be added into this service. The original component with artifact type SERVICE will be removed (any properties specified in the original component will be ignored).
-        enum:
-          - DOCKER
-          - TARBALL
-          - SERVICE
-        default: DOCKER
-      uri:
-        type: string
-        description: Artifact location to support multiple artifact stores (optional).
-  Component:
-    description: One or more components of the service. If the service is HBase say, then the component can be a simple role like master or regionserver. If the service is a complex business webapp then a component can be other services say Kafka or Storm. Thereby it opens up the support for complex and nested services.
-    required:
-    - name
-    properties:
-      name:
-        type: string
-        description: Name of the service component (mandatory). If Registry DNS is enabled, the max length is 63 characters. If unique component support is enabled, the max length is lowered to 44 characters.
-      state:
-        description: The state of the component
-        $ref: "#/definitions/ComponentState"
-      dependencies:
-        type: array
-        items:
-          type: string
-        description: An array of service components which should be in READY state (as defined by readiness check), before this component can be started. The dependencies across all components of a service should be represented as a DAG.
-      readiness_check:
-        description: Readiness check for this component.
-        $ref: '#/definitions/ReadinessCheck'
-      artifact:
-        description: Artifact of the component (optional). If not specified, the service level global artifact takes effect.
-        $ref: '#/definitions/Artifact'
-      launch_command:
-        type: string
-        description: The custom launch command of this component (optional for DOCKER component, required otherwise). When specified at the component level, it overrides the value specified at the global level (if any).
-      resource:
-        description: Resource of this component (optional). If not specified, the service level global resource takes effect.
-        $ref: '#/definitions/Resource'
-      number_of_containers:
-        type: integer
-        format: int64
-        description: Number of containers for this component (optional). If not specified, the service level global number_of_containers takes effect.
-      containers:
-        type: array
-        description: Containers of a started component. Specifying a value for this attribute for the POST payload raises a validation error. This blob is available only in the GET response of a started service.
-        items:
-          $ref: '#/definitions/Container'
-      run_privileged_container:
-        type: boolean
-        description: Run all containers of this component in privileged mode (YARN-4262).
-      placement_policy:
-        description: Advanced scheduling and placement policies for all containers of this component.
-        $ref: '#/definitions/PlacementPolicy'
-      configuration:
-        description: Config properties for this component.
-        $ref: '#/definitions/Configuration'
-      quicklinks:
-        type: array
-        items:
-          type: string
-        description: A list of quicklink keys defined at the service level, and to be resolved by this component.
-      restartPolicy:
-        type: string
-        description: Policy of restart component. Including ALWAYS (Always restart component even if instance exit code = 0); ON_FAILURE (Only restart component if instance exit code != 0); NEVER (Do not restart in any cases)
-        enum:
-          - ALWAYS
-          - ON_FAILURE
-          - NEVER
-        default: ALWAYS
-  ReadinessCheck:
-    description: A check to be performed to determine the readiness of a component instance (a container). If no readiness check is specified, the default readiness check will be used unless the yarn.service.default-readiness-check.enabled configuration property is set to false at the component, service, or system level. The artifact field is currently unsupported but may be implemented in the future, enabling a pluggable helper container to support advanced use cases.
-    required:
-    - type
-    properties:
-      type:
-        type: string
-        description: DEFAULT (AM checks whether the container has an IP and optionally performs a DNS lookup for the container hostname), HTTP (AM performs default checks, plus sends a REST call to the container and expects a response code between 200 and 299), or PORT (AM performs default checks, plus attempts to open a socket connection to the container on a specified port).
-        enum:
-          - DEFAULT
-          - HTTP
-          - PORT
-      properties:
-        type: object
-        description: A blob of key value pairs that will be used to configure the check.
-        additionalProperties:
-          type: string
-      artifact:
-        description: Artifact of the pluggable readiness check helper container (optional). If specified, this helper container typically hosts the http uri and encapsulates the complex scripts required to perform actual container readiness check. At the end it is expected to respond a 204 No content just like the simplified use case. This pluggable framework benefits service owners who can run services without any packaging modifications. Note, artifacts of type docker only is supported for now. NOT IMPLEMENTED YET
-        $ref: '#/definitions/Artifact'
-  Configuration:
-    description: Set of configuration properties that can be injected into the service components via envs, files and custom pluggable helper docker containers. Files of several standard formats like xml, properties, json, yaml and templates will be supported.
-    properties:
-      properties:
-        type: object
-        description: A blob of key-value pairs for configuring the YARN service AM.
-        additionalProperties:
-          type: string
-      env:
-        type: object
-        description: A blob of key-value pairs which will be appended to the default system properties and handed off to the service at start time. All placeholder references to properties will be substituted before injection.
-        additionalProperties:
-          type: string
-      files:
-        description: Array of list of files that needs to be created and made available as volumes in the service component containers.
-        type: array
-        items:
-          $ref: '#/definitions/ConfigFile'
-  ConfigFile:
-    description: A config file that needs to be created and made available as a volume in a service component container.
-    properties:
-      type:
-        type: string
-        description: Config file in the standard format like xml, properties, json, yaml, template.
-        enum:
-          - XML
-          - PROPERTIES
-          - JSON
-          - YAML
-          - TEMPLATE
-          - HADOOP_XML
-          - STATIC
-          - ARCHIVE
-      dest_file:
-        type: string
-        description: The path that this configuration file should be created as. If it is an absolute path, it will be mounted into the DOCKER container. Absolute paths are only allowed for DOCKER containers.  If it is a relative path, only the file name should be provided, and the file will be created in the container local working directory under a folder named conf.
-      src_file:
-        type: string
-        description: This provides the source location of the configuration file, the content of which is dumped to dest_file post property substitutions, in the format as specified in type. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, or hdfs etc. Currently, only hdfs is supported.
-      properties:
-        type: object
-        description: A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any, existing properties in src_file or be added as new properties in src_file.
-        additionalProperties:
-          type: string
-  Container:
-    description: An instance of a running service container.
-    properties:
-      id:
-        type: string
-        description: Unique container id of a running service, e.g. container_e3751_1458061340047_0008_01_000002.
-      launch_time:
-        type: string
-        format: date
-        description: The time when the container was created, e.g. 2016-03-16T01:01:49.000Z. This will most likely be different from cluster launch time.
-      ip:
-        type: string
-        description: IP address of a running container, e.g. 172.31.42.141. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.
-      hostname:
-        type: string
-        description: Fully qualified hostname of a running container, e.g. ctr-e3751-1458061340047-0008-01-000002.examplestg.site. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.
-      bare_host:
-        type: string
-        description: The bare node or host in which the container is running, e.g. cn008.example.com.
-      state:
-        description: State of the container of a service.
-        $ref: '#/definitions/ContainerState'
-      component_instance_name:
-        type: string
-        description: Name of the component instance that this container instance belongs to. Component instance name is named as $COMPONENT_NAME-i, where i is a
-                     monotonically increasing integer. E.g. A componet called nginx can have multiple component instances named as nginx-0, nginx-1 etc.
-                     Each component instance is backed by a container instance.
-      resource:
-        description: Resource used for this container.
-        $ref: '#/definitions/Resource'
-      artifact:
-        description: Artifact used for this container.
-        $ref: '#/definitions/Artifact'
-      privileged_container:
-        type: boolean
-        description: Container running in privileged mode or not.
-  ServiceState:
-    description: The current state of a service.
-    properties:
-      state:
-        type: string
-        description: enum of the state of the service
-        enum:
-          - ACCEPTED
-          - STARTED
-          - STABLE
-          - STOPPED
-          - FAILED
-          - FLEX
-          - UPGRADING
-  ContainerState:
-    description: The current state of the container of a service.
-    properties:
-      state:
-        type: string
-        description: enum of the state of the container
-        enum:
-          - INIT
-          - STARTED
-          - READY
-  ComponentState:
-    description: The state of the component
-    properties:
-      state:
-        type: string
-        description: enum of the state of the component
-        enum:
-          - INIT
-          - FLEXING
-          - STABLE
-          - UPGRADING
-  ServiceStatus:
-    description: The current status of a submitted service, returned as a response to the GET API.
-    properties:
-      diagnostics:
-        type: string
-        description: Diagnostic information (if any) for the reason of the current state of the service. It typically has a non-null value, if the service is in a non-running state.
-      state:
-        description: Service state.
-        $ref: '#/definitions/ServiceState'
-      code:
-        type: integer
-        format: int32
-        description: An error code specific to a scenario which service owners should be able to use to understand the failure in addition to the diagnostic information.
-  KerberosPrincipal:
-    description: The kerberos principal info of the user who launches the service.
-    properties:
-      principal_name:
-        type: string
-        description: The principal name of the user who launches the service. Note that `_HOST` is required in the `principal_name` field such as `testuser/_HOST@EXAMPLE.COM` because Hadoop client validates that the server's (in this case, the AM's) principal has hostname present when communicating to the server.
-      keytab:
-        type: string
-        description: The URI of the kerberos keytab. Currently supports only files present on the bare host. URI starts with "file\://" - A path on the local host where the keytab is stored. It is assumed that admin pre-installs the keytabs on the local host before AM launches.
-
-
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/log4j-server.properties
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/log4j-server.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/log4j-server.properties
deleted file mode 100644
index 8c679b9..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/log4j-server.properties
+++ /dev/null
@@ -1,76 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-#  or more contributor license agreements.  See the NOTICE file
-#  distributed with this work for additional information
-#  regarding copyright ownership.  The ASF licenses this file
-#  to you under the Apache License, Version 2.0 (the
-#  "License"); you may not use this file except in compliance
-#  with the License.  You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#
-
-# This is the log4j configuration for YARN Services REST API Server
-
-# Log rotation based on size (100KB) with a max of 10 backup files
-log4j.rootLogger=INFO, restservicelog
-log4j.threshhold=ALL
-
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} (%F:%M(%L)) - %m%n
-
-log4j.appender.restservicelog=org.apache.log4j.RollingFileAppender
-log4j.appender.restservicelog.layout=org.apache.log4j.PatternLayout
-log4j.appender.restservicelog.File=${REST_SERVICE_LOG_DIR}/restservice.log
-log4j.appender.restservicelog.MaxFileSize=1GB
-log4j.appender.restservicelog.MaxBackupIndex=10
-
-# log layout skips stack-trace creation operations by avoiding line numbers and method
-log4j.appender.restservicelog.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n
-
-# debug edition is much more expensive
-#log4j.appender.restservicelog.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
-
-# configure stderr
-# set the conversion pattern of stderr
-# Print the date in ISO 8601 format
-log4j.appender.stderr=org.apache.log4j.ConsoleAppender
-log4j.appender.stderr.Target=System.err
-log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
-log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n
-
-log4j.appender.subprocess=org.apache.log4j.ConsoleAppender
-log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout
-log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n
-
-# for debugging REST API Service
-#log4j.logger.org.apache.hadoop.yarn.services=DEBUG
-
-# uncomment to debug service lifecycle issues
-#log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG
-#log4j.logger.org.apache.hadoop.yarn.service=DEBUG
-
-# uncomment for YARN operations
-#log4j.logger.org.apache.hadoop.yarn.client=DEBUG
-
-# uncomment this to debug security problems
-#log4j.logger.org.apache.hadoop.security=DEBUG
-
-#crank back on some noise
-log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
-log4j.logger.org.apache.hadoop.hdfs=WARN
-log4j.logger.org.apache.hadoop.hdfs.shortcircuit=ERROR
-
-log4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN
-log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN
-log4j.logger.org.apache.zookeeper=WARN
-log4j.logger.org.apache.curator.framework.state=ERROR
-log4j.logger.org.apache.curator.framework.imps=WARN
-
-log4j.logger.org.mortbay.log=DEBUG

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/webapps/api-server/app
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/webapps/api-server/app b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/webapps/api-server/app
deleted file mode 100644
index 6a077b1..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/webapps/api-server/app
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-DON'T DELETE. REST WEBAPP RUN SCRIPT WILL STOP WORKING.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml
deleted file mode 100644
index 1282c9f..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml
+++ /dev/null
@@ -1,36 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<web-app xmlns="http://java.sun.com/xml/ns/javaee" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-        xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_3_0.xsd"
-        version="3.0">
-
-    <servlet>
-        <servlet-name>Jersey REST API</servlet-name>
-        <servlet-class>com.sun.jersey.spi.container.servlet.ServletContainer</servlet-class>
-        <init-param>
-            <param-name>com.sun.jersey.config.property.packages</param-name>
-            <param-value>org.apache.hadoop.yarn.service.webapp,org.apache.hadoop.yarn.service.api,org.apache.hadoop.yarn.service.api.records</param-value>
-        </init-param>
-        <init-param>
-          <param-name>com.sun.jersey.api.json.POJOMappingFeature</param-name>
-          <param-value>true</param-value>
-        </init-param>
-        <load-on-startup>1</load-on-startup>
-    </servlet>
-    <servlet-mapping>
-        <servlet-name>Jersey REST API</servlet-name>
-        <url-pattern>/*</url-pattern>
-    </servlet-mapping>
-</web-app>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a23ff8d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
deleted file mode 100644
index 75b9486..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.service;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.service.api.records.Artifact;
-import org.apache.hadoop.yarn.service.api.records.Component;
-import org.apache.hadoop.yarn.service.api.records.Container;
-import org.apache.hadoop.yarn.service.api.records.ContainerState;
-import org.apache.hadoop.yarn.service.api.records.Resource;
-import org.apache.hadoop.yarn.service.api.records.Service;
-import org.apache.hadoop.yarn.service.client.ServiceClient;
-import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
-import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-/**
- * A mock version of ServiceClient - This class is design
- * to simulate various error conditions that will happen
- * when a consumer class calls ServiceClient.
- */
-public class ServiceClientTest extends ServiceClient {
-
-  private Configuration conf = new Configuration();
-  private Service goodServiceStatus = buildLiveGoodService();
-  private boolean initialized;
-  private Set<String> expectedInstances = new HashSet<>();
-
-  public ServiceClientTest() {
-    super();
-  }
-
-  @Override
-  public void init(Configuration conf) {
-    if (!initialized) {
-      super.init(conf);
-      initialized = true;
-    }
-  }
-
-  @Override
-  public void stop() {
-    // This is needed for testing  API Server which uses client to get status
-    // and then perform an action.
-  }
-
-  public void forceStop() {
-    expectedInstances.clear();
-    super.stop();
-  }
-
-  @Override
-  public Configuration getConfig() {
-    return conf;
-  }
-
-  @Override
-  public ApplicationId actionCreate(Service service) throws IOException {
-    ServiceApiUtil.validateAndResolveService(service,
-        new SliderFileSystem(conf), getConfig());
-    return ApplicationId.newInstance(System.currentTimeMillis(), 1);
-  }
-
-  @Override
-  public Service getStatus(String appName) throws FileNotFoundException {
-    if ("jenkins".equals(appName)) {
-      return goodServiceStatus;
-    } else {
-      throw new FileNotFoundException("Service " + appName + " not found");
-    }
-  }
-
-  @Override
-  public int actionStart(String serviceName)
-      throws YarnException, IOException {
-    if (serviceName != null && serviceName.equals("jenkins")) {
-      return EXIT_SUCCESS;
-    } else {
-      throw new ApplicationNotFoundException("");
-    }
-  }
-
-  @Override
-  public int actionStop(String serviceName, boolean waitForAppStopped)
-      throws YarnException, IOException {
-    if (serviceName == null) {
-      throw new NullPointerException();
-    }
-    if (serviceName.equals("jenkins")) {
-      return EXIT_SUCCESS;
-    } else if (serviceName.equals("jenkins-second-stop")) {
-      return EXIT_COMMAND_ARGUMENT_ERROR;
-    } else {
-      throw new ApplicationNotFoundException("");
-    }
-  }
-
-  @Override
-  public int actionDestroy(String serviceName) {
-    if (serviceName != null) {
-      if (serviceName.equals("jenkins")) {
-        return EXIT_SUCCESS;
-      } else if (serviceName.equals("jenkins-already-stopped")) {
-        return EXIT_SUCCESS;
-      } else if (serviceName.equals("jenkins-doesn't-exist")) {
-        return EXIT_NOT_FOUND;
-      } else if (serviceName.equals("jenkins-error-cleaning-registry")) {
-        return EXIT_OTHER_FAILURE;
-      }
-    }
-    throw new IllegalArgumentException();
-  }
-
-  @Override
-  public int initiateUpgrade(Service service) throws YarnException,
-      IOException {
-    if (service.getName() != null && service.getName().equals("jenkins")) {
-      return EXIT_SUCCESS;
-    } else {
-      throw new IllegalArgumentException();
-    }
-  }
-
-  @Override
-  public int actionUpgrade(Service service, List<Container> compInstances)
-      throws IOException, YarnException {
-    if (service.getName() != null && service.getName().equals("jenkins")
-        && compInstances != null) {
-      Set<String> actualInstances = compInstances.stream().map(
-          Container::getComponentInstanceName).collect(Collectors.toSet());
-      if (actualInstances.equals(expectedInstances)) {
-        return EXIT_SUCCESS;
-      }
-    }
-    throw new IllegalArgumentException();
-  }
-
-  Service getGoodServiceStatus() {
-    return goodServiceStatus;
-  }
-
-  void setExpectedInstances(Set<String> instances) {
-    if (instances != null) {
-      expectedInstances.addAll(instances);
-    }
-  }
-
-  static Service buildGoodService() {
-    Service service = new Service();
-    service.setName("jenkins");
-    service.setVersion("v1");
-    Artifact artifact = new Artifact();
-    artifact.setType(Artifact.TypeEnum.DOCKER);
-    artifact.setId("jenkins:latest");
-    Resource resource = new Resource();
-    resource.setCpus(1);
-    resource.setMemory("2048");
-    List<Component> components = new ArrayList<>();
-    for (int i = 0; i < 2; i++) {
-      Component c = new Component();
-      c.setName("jenkins" + i);
-      c.setNumberOfContainers(2L);
-      c.setArtifact(artifact);
-      c.setLaunchCommand("");
-      c.setResource(resource);
-      components.add(c);
-    }
-    service.setComponents(components);
-    return service;
-  }
-
-  static Service buildLiveGoodService() {
-    Service service = buildGoodService();
-    Component comp = service.getComponents().iterator().next();
-    List<Container> containers = new ArrayList<>();
-    for (int i = 0; i < comp.getNumberOfContainers(); i++) {
-      Container container = new Container();
-      container.setComponentInstanceName(comp.getName() + "-" + (i + 1));
-      container.setState(ContainerState.READY);
-      containers.add(container);
-    }
-    comp.setContainers(containers);
-    return service;
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: HDDS-73. Add acceptance tests for Ozone Shell. Contributed by Lokesh Jain.

Posted by ar...@apache.org.
HDDS-73. Add acceptance tests for Ozone Shell.
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0367d3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0367d3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0367d3b

Branch: refs/heads/HDDS-48
Commit: e0367d3b248d47fc95e5df5d772f93663319c3b8
Parents: 6547645
Author: Anu Engineer <ae...@apache.org>
Authored: Thu May 17 14:58:09 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Thu May 17 15:21:26 2018 -0700

----------------------------------------------------------------------
 .../src/test/compose/docker-config              |   1 +
 .../robotframework/acceptance/ozone-shell.robot | 235 +++++++++++++++++++
 .../test/robotframework/acceptance/ozone.robot  |  32 ---
 3 files changed, 236 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0367d3b/hadoop-ozone/acceptance-test/src/test/compose/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/docker-config b/hadoop-ozone/acceptance-test/src/test/compose/docker-config
index c693db0..0591a7a 100644
--- a/hadoop-ozone/acceptance-test/src/test/compose/docker-config
+++ b/hadoop-ozone/acceptance-test/src/test/compose/docker-config
@@ -16,6 +16,7 @@
 
 CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
 OZONE-SITE.XML_ozone.ksm.address=ksm
+OZONE-SITE.XML_ozone.ksm.http-address=ksm:9874
 OZONE-SITE.XML_ozone.scm.names=scm
 OZONE-SITE.XML_ozone.enabled=True
 OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0367d3b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-shell.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-shell.robot b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-shell.robot
new file mode 100644
index 0000000..0f01b8d
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-shell.robot
@@ -0,0 +1,235 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation       Smoke test to start cluster with docker-compose environments.
+Library             OperatingSystem
+Suite Setup         Startup Ozone Cluster
+Suite Teardown      Teardown Ozone Cluster
+
+*** Variables ***
+${basedir}
+*** Test Cases ***
+
+Daemons are running without error
+    Is daemon running without error           ksm
+    Is daemon running without error           scm
+    Is daemon running without error           namenode
+    Is daemon running without error           datanode
+
+Check if datanode is connected to the scm
+    Wait Until Keyword Succeeds     3min    5sec    Have healthy datanodes   1
+
+Scale it up to 5 datanodes
+    Scale datanodes up  5
+    Wait Until Keyword Succeeds     3min    5sec    Have healthy datanodes   5
+
+Test ozone shell (RestClient without http port)
+                    Execute on          datanode        ozone oz -createVolume http://ksm/hive -user bilbo -quota 100TB -root
+    ${result} =     Execute on          datanode        ozone oz -listVolume o3://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
+                    Should contain      ${result}       createdOn
+                    Execute on          datanode        ozone oz -updateVolume http://ksm/hive -user bill -quota 10TB
+    ${result} =     Execute on          datanode        ozone oz -infoVolume http://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
+                    Should Be Equal     ${result}       bill
+    ${result} =     Execute on          datanode        ozone oz -infoVolume http://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size'
+                    Should Be Equal     ${result}       10
+                    Execute on          datanode        ozone oz -createBucket http://ksm/hive/bb1
+    ${result} =     Execute on          datanode        ozone oz -infoBucket http://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
+                    Should Be Equal     ${result}       DISK
+    ${result} =     Execute on          datanode        ozone oz -updateBucket http://ksm/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
+                    Should Be Equal     ${result}       GROUP
+    ${result} =     Execute on          datanode        ozone oz -updateBucket http://ksm/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
+                    Should Be Equal     ${result}       USER
+    ${result} =     Execute on          datanode        ozone oz -listBucket o3://ksm/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
+                    Should Be Equal     ${result}       hive
+                    Execute on          datanode        ozone oz -putKey http://ksm/hive/bb1/key1 -file NOTICE.txt
+                    Execute on          datanode        rm -f NOTICE.txt.1
+                    Execute on          datanode        ozone oz -getKey http://ksm/hive/bb1/key1 -file NOTICE.txt.1
+                    Execute on          datanode        ls -l NOTICE.txt.1
+    ${result} =     Execute on          datanode        ozone oz -infoKey http://ksm/hive/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
+                    Should contain      ${result}       createdOn
+    ${result} =     Execute on          datanode        ozone oz -listKey o3://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
+                    Should Be Equal     ${result}       key1
+                    Execute on          datanode        ozone oz -deleteKey http://ksm/hive/bb1/key1 -v
+                    Execute on          datanode        ozone oz -deleteBucket http://ksm/hive/bb1
+                    Execute on          datanode        ozone oz -deleteVolume http://ksm/hive -user bilbo
+
+Test ozone shell (RestClient with http port)
+                    Execute on          datanode        ozone oz -createVolume http://ksm:9874/hive -user bilbo -quota 100TB -root
+    ${result} =     Execute on          datanode        ozone oz -listVolume o3://ksm:9862 -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
+                    Should contain      ${result}       createdOn
+                    Execute on          datanode        ozone oz -updateVolume http://ksm:9874/hive -user bill -quota 10TB
+    ${result} =     Execute on          datanode        ozone oz -infoVolume http://ksm:9874/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
+                    Should Be Equal     ${result}       bill
+    ${result} =     Execute on          datanode        ozone oz -infoVolume http://ksm:9874/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size'
+                    Should Be Equal     ${result}       10
+                    Execute on          datanode        ozone oz -createBucket http://ksm:9874/hive/bb1
+    ${result} =     Execute on          datanode        ozone oz -infoBucket http://ksm:9874/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
+                    Should Be Equal     ${result}       DISK
+    ${result} =     Execute on          datanode        ozone oz -updateBucket http://ksm:9874/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
+                    Should Be Equal     ${result}       GROUP
+    ${result} =     Execute on          datanode        ozone oz -updateBucket http://ksm:9874/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
+                    Should Be Equal     ${result}       USER
+    ${result} =     Execute on          datanode        ozone oz -listBucket o3://ksm:9862/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
+                    Should Be Equal     ${result}       hive
+                    Execute on          datanode        ozone oz -putKey http://ksm:9874/hive/bb1/key1 -file NOTICE.txt
+                    Execute on          datanode        rm -f NOTICE.txt.1
+                    Execute on          datanode        ozone oz -getKey http://ksm:9874/hive/bb1/key1 -file NOTICE.txt.1
+                    Execute on          datanode        ls -l NOTICE.txt.1
+    ${result} =     Execute on          datanode        ozone oz -infoKey http://ksm:9874/hive/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
+                    Should contain      ${result}       createdOn
+    ${result} =     Execute on          datanode        ozone oz -listKey o3://ksm:9862/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
+                    Should Be Equal     ${result}       key1
+                    Execute on          datanode        ozone oz -deleteKey http://ksm:9874/hive/bb1/key1 -v
+                    Execute on          datanode        ozone oz -deleteBucket http://ksm:9874/hive/bb1
+                    Execute on          datanode        ozone oz -deleteVolume http://ksm:9874/hive -user bilbo
+
+Test ozone shell (RestClient without hostname)
+                    Execute on          datanode        ozone oz -createVolume http:///hive -user bilbo -quota 100TB -root
+    ${result} =     Execute on          datanode        ozone oz -listVolume o3:/// -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
+                    Should contain      ${result}       createdOn
+                    Execute on          datanode        ozone oz -updateVolume http:///hive -user bill -quota 10TB
+    ${result} =     Execute on          datanode        ozone oz -infoVolume http:///hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
+                    Should Be Equal     ${result}       bill
+    ${result} =     Execute on          datanode        ozone oz -infoVolume http:///hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size'
+                    Should Be Equal     ${result}       10
+                    Execute on          datanode        ozone oz -createBucket http:///hive/bb1
+    ${result} =     Execute on          datanode        ozone oz -infoBucket http:///hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
+                    Should Be Equal     ${result}       DISK
+    ${result} =     Execute on          datanode        ozone oz -updateBucket http:///hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
+                    Should Be Equal     ${result}       GROUP
+    ${result} =     Execute on          datanode        ozone oz -updateBucket http:///hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
+                    Should Be Equal     ${result}       USER
+    ${result} =     Execute on          datanode        ozone oz -listBucket o3:///hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
+                    Should Be Equal     ${result}       hive
+                    Execute on          datanode        ozone oz -putKey http:///hive/bb1/key1 -file NOTICE.txt
+                    Execute on          datanode        rm -f NOTICE.txt.1
+                    Execute on          datanode        ozone oz -getKey http:///hive/bb1/key1 -file NOTICE.txt.1
+                    Execute on          datanode        ls -l NOTICE.txt.1
+    ${result} =     Execute on          datanode        ozone oz -infoKey http:///hive/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
+                    Should contain      ${result}       createdOn
+    ${result} =     Execute on          datanode        ozone oz -listKey o3:///hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
+                    Should Be Equal     ${result}       key1
+                    Execute on          datanode        ozone oz -deleteKey http:///hive/bb1/key1 -v
+                    Execute on          datanode        ozone oz -deleteBucket http:///hive/bb1
+                    Execute on          datanode        ozone oz -deleteVolume http:///hive -user bilbo
+
+Test ozone shell (RpcClient without http port)
+                    Execute on          datanode        ozone oz -createVolume o3://ksm/hive -user bilbo -quota 100TB -root
+    ${result} =     Execute on          datanode        ozone oz -listVolume o3://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
+                    Should contain      ${result}       createdOn
+                    Execute on          datanode        ozone oz -updateVolume o3://ksm/hive -user bill -quota 10TB
+    ${result} =     Execute on          datanode        ozone oz -infoVolume o3://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
+                    Should Be Equal     ${result}       bill
+    ${result} =     Execute on          datanode        ozone oz -infoVolume o3://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size'
+                    Should Be Equal     ${result}       10
+                    Execute on          datanode        ozone oz -createBucket o3://ksm/hive/bb1
+    ${result} =     Execute on          datanode        ozone oz -infoBucket o3://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
+                    Should Be Equal     ${result}       DISK
+    ${result} =     Execute on          datanode        ozone oz -updateBucket o3://ksm/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
+                    Should Be Equal     ${result}       GROUP
+    ${result} =     Execute on          datanode        ozone oz -updateBucket o3://ksm/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
+                    Should Be Equal     ${result}       USER
+    ${result} =     Execute on          datanode        ozone oz -listBucket o3://ksm/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
+                    Should Be Equal     ${result}       hive
+                    Execute on          datanode        ozone oz -deleteBucket o3://ksm/hive/bb1
+                    Execute on          datanode        ozone oz -deleteVolume o3://ksm/hive -user bilbo
+
+Test ozone shell (RpcClient with http port)
+                    Execute on          datanode        ozone oz -createVolume o3://ksm:9862/hive -user bilbo -quota 100TB -root
+    ${result} =     Execute on          datanode        ozone oz -listVolume o3://ksm:9862 -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
+                    Should contain      ${result}       createdOn
+                    Execute on          datanode        ozone oz -updateVolume o3://ksm:9862/hive -user bill -quota 10TB
+    ${result} =     Execute on          datanode        ozone oz -infoVolume o3://ksm:9862/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
+                    Should Be Equal     ${result}       bill
+    ${result} =     Execute on          datanode        ozone oz -infoVolume o3://ksm:9862/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size'
+                    Should Be Equal     ${result}       10
+                    Execute on          datanode        ozone oz -createBucket o3://ksm:9862/hive/bb1
+    ${result} =     Execute on          datanode        ozone oz -infoBucket o3://ksm:9862/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
+                    Should Be Equal     ${result}       DISK
+    ${result} =     Execute on          datanode        ozone oz -updateBucket o3://ksm:9862/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
+                    Should Be Equal     ${result}       GROUP
+    ${result} =     Execute on          datanode        ozone oz -updateBucket o3://ksm:9862/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
+                    Should Be Equal     ${result}       USER
+    ${result} =     Execute on          datanode        ozone oz -listBucket o3://ksm:9862/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
+                    Should Be Equal     ${result}       hive
+                    Execute on          datanode        ozone oz -deleteBucket o3://ksm:9862/hive/bb1
+                    Execute on          datanode        ozone oz -deleteVolume o3://ksm:9862/hive -user bilbo
+
+Test ozone shell (RpcClient without hostname)
+                    Execute on          datanode        ozone oz -createVolume o3:///hive -user bilbo -quota 100TB -root
+    ${result} =     Execute on          datanode        ozone oz -listVolume o3:/// -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
+                    Should contain      ${result}       createdOn
+                    Execute on          datanode        ozone oz -updateVolume o3:///hive -user bill -quota 10TB
+    ${result} =     Execute on          datanode        ozone oz -infoVolume o3:///hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
+                    Should Be Equal     ${result}       bill
+    ${result} =     Execute on          datanode        ozone oz -infoVolume o3:///hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size'
+                    Should Be Equal     ${result}       10
+                    Execute on          datanode        ozone oz -createBucket o3:///hive/bb1
+    ${result} =     Execute on          datanode        ozone oz -infoBucket o3:///hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
+                    Should Be Equal     ${result}       DISK
+    ${result} =     Execute on          datanode        ozone oz -updateBucket o3:///hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
+                    Should Be Equal     ${result}       GROUP
+    ${result} =     Execute on          datanode        ozone oz -updateBucket o3:///hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
+                    Should Be Equal     ${result}       USER
+    ${result} =     Execute on          datanode        ozone oz -listBucket o3:///hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
+                    Should Be Equal     ${result}       hive
+                    Execute on          datanode        ozone oz -deleteBucket o3:///hive/bb1
+                    Execute on          datanode        ozone oz -deleteVolume o3:///hive -user bilbo
+
+*** Keywords ***
+
+Startup Ozone Cluster
+    ${rc}       ${output} =                 Run docker compose          down
+    ${rc}       ${output} =                 Run docker compose          up -d
+    Should Be Equal As Integers             ${rc} 	                    0
+    Wait Until Keyword Succeeds             1min    5sec    Is Daemon started   ksm     HTTP server of KSM is listening
+
+Teardown Ozone Cluster
+    Run docker compose      down
+
+Is daemon running without error
+    [arguments]             ${name}
+    ${result} =             Run                     docker ps
+    Should contain          ${result}               _${name}_1
+    ${rc}                   ${result} =             Run docker compose      logs ${name}
+    Should not contain      ${result}               ERROR
+
+Is Daemon started
+    [arguments]     ${name}             ${expression}
+    ${rc}           ${result} =         Run docker compose      logs
+    Should contain  ${result}           ${expression}
+
+Have healthy datanodes
+    [arguments]         ${requirednodes}
+    ${result} =         Execute on          scm                 curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value'
+    Should Be Equal     ${result}           ${requirednodes}
+
+Scale datanodes up
+    [arguments]              ${requirednodes}
+    Run docker compose       scale datanode=${requirednodes}
+
+Execute on
+    [arguments]     ${componentname}    ${command}
+    ${rc}           ${return} =         Run docker compose          exec ${componentname} ${command}
+    [return]        ${return}
+
+Run docker compose
+    [arguments]                     ${command}
+                                    Set Environment Variable    OZONEDIR                               ${basedir}/hadoop-dist/target/ozone
+    ${rc}                           ${output} =                 Run And Return Rc And Output           docker-compose -f ${basedir}/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml ${command}
+    Log                             ${output}
+    Should Be Equal As Integers     ${rc}                       0
+    [return]                            ${rc}                       ${output}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0367d3b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
index c0e04a8..c52db56 100644
--- a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
+++ b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
@@ -47,38 +47,6 @@ Test rest interface
     ${result} =     Execute on          datanode        curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1"
                     Should contain      ${result}       200 OK
 
-Test ozone cli
-                    Execute on          datanode        ozone oz -createVolume http://ksm/hive -user bilbo -quota 100TB -root
-    ${result} =     Execute on          datanode        ozone oz -listVolume o3://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
-                    Should contain      ${result}       createdOn
-                    Execute on          datanode        ozone oz -updateVolume http://ksm/hive -user bill -quota 10TB
-    ${result} =     Execute on          datanode        ozone oz -infoVolume http://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
-                    Should Be Equal     ${result}       bill
-    ${result} =     Execute on          datanode        ozone oz -infoVolume http://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size'
-                    Should Be Equal     ${result}       10
-                    Execute on          datanode        ozone oz -createBucket http://ksm/hive/bb1
-    ${result} =     Execute on          datanode        ozone oz -infoBucket http://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
-                    Should Be Equal     ${result}       DISK
-    ${result} =     Execute on          datanode        ozone oz -updateBucket http://ksm/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
-                    Should Be Equal     ${result}       GROUP
-    ${result} =     Execute on          datanode        ozone oz -updateBucket http://ksm/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
-                    Should Be Equal     ${result}       USER
-    ${result} =     Execute on          datanode        ozone oz -listBucket o3://ksm/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
-                    Should Be Equal     ${result}       hive
-                    Execute on          datanode        ozone oz -putKey http://ksm/hive/bb1/key1 -file NOTICE.txt
-                    Execute on          datanode        rm -f NOTICE.txt.1
-                    Execute on          datanode        ozone oz -getKey http://ksm/hive/bb1/key1 -file NOTICE.txt.1
-                    Execute on          datanode        ls -l NOTICE.txt.1
-    ${result} =     Execute on          datanode        ozone oz -infoKey http://ksm/hive/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
-                    Should contain      ${result}       createdOn
-    ${result} =     Execute on          datanode        ozone oz -listKey o3://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
-                    Should Be Equal     ${result}       key1
-                    Execute on          datanode        ozone oz -deleteKey http://ksm/hive/bb1/key1 -v
-                    Execute on          datanode        ozone oz -deleteBucket http://ksm/hive/bb1
-                    Execute on          datanode        ozone oz -deleteVolume http://ksm/hive -user bilbo
-
-
-
 Check webui static resources
     ${result} =			Execute on		scm		curl -s -I http://localhost:9876/static/bootstrap-3.0.2/js/bootstrap.min.js
 	 Should contain		${result}		200


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: YARN-8296. Removed unique_component_support from YARN services. Contributed by Suma Shivaprasad

Posted by ar...@apache.org.
YARN-8296.  Removed unique_component_support from YARN services.
            Contributed by Suma Shivaprasad


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2f2dd22a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2f2dd22a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2f2dd22a

Branch: refs/heads/HDDS-48
Commit: 2f2dd22aad4a003228b9efe3b4c506a6922d09d8
Parents: 989cfdc
Author: Eric Yang <ey...@apache.org>
Authored: Thu May 17 20:58:13 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Thu May 17 20:58:13 2018 -0400

----------------------------------------------------------------------
 .../src/main/resources/definition/YARN-Services-Examples.md    | 1 -
 .../src/site/markdown/yarn-service/YarnServiceAPI.md           | 3 +--
 .../src/main/webapp/app/models/yarn-servicedef.js              | 4 ----
 .../app/templates/components/service-component-table.hbs       | 6 ------
 4 files changed, 1 insertion(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f2dd22a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
index 83e558c..b7ad6c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md
@@ -221,7 +221,6 @@ POST URL - http://localhost:8088:/app/v1/services/hbase-app-1
     {
       "name": "regionserver",
       "number_of_containers": 3,
-      "unique_component_support": "true",
       "artifact": {
         "id": "hbase:latest",
         "type": "DOCKER"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f2dd22a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
index 2b567b0..f1dc81b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
@@ -220,7 +220,7 @@ One or more components of the service. If the service is HBase say, then the com
 
 |Name|Description|Required|Schema|Default|
 |----|----|----|----|----|
-|name|Name of the service component (mandatory). If Registry DNS is enabled, the max length is 63 characters. If unique component support is enabled, the max length is lowered to 44 characters.|true|string||
+|name|Name of the service component (mandatory). If Registry DNS is enabled, the max length is 44 characters.|true|string||
 |state|The state of the component|false|ComponentState||
 |dependencies|An array of service components which should be in READY state (as defined by readiness check), before this component can be started. The dependencies across all components of a service should be represented as a DAG.|false|string array||
 |readiness_check|Readiness check for this component.|false|ReadinessCheck||
@@ -638,7 +638,6 @@ POST URL - http://localhost:8088:/app/v1/services/hbase-app-1
     {
       "name": "regionserver",
       "number_of_containers": 3,
-      "unique_component_support": "true",
       "artifact": {
         "id": "hbase:latest",
         "type": "DOCKER"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f2dd22a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js
index 19c74e1..2a9953d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js
@@ -65,7 +65,6 @@ export default DS.Model.extend({
       artifactType: 'DOCKER',
       launchCommand: '',
       dependencies: [],
-      uniqueComponentSupport: false,
       configuration: null
     });
   },
@@ -199,9 +198,6 @@ export default DS.Model.extend({
       cpus: record.get('cpus'),
       memory: record.get('memory')
     };
-    if (record.get('uniqueComponentSupport')) {
-      json['unique_component_support'] = "true";
-    }
     if (record.get('configuration')) {
       json['configuration'] = record.get('configuration');
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f2dd22a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs
index 9d519ae..05a7451 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs
@@ -97,12 +97,6 @@
           <label class="required">Launch Command</label>
           {{input type="text" class="form-control" value=currentComponent.launchCommand}}
         </div>
-        <div class="form-group">
-          <label class="checkbox-inline">
-            {{input type="checkbox" checked=currentComponent.uniqueComponentSupport}}
-            Unique Component Support
-          </label>
-        </div>
       </div>
       <div class="modal-footer">
         <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: YARN-8141. Removed YARN_CONTAINER_RUNTIME_DOCKER_LOCAL_RESOURCE_MOUNTS flag. Contributed by Chandni Singh

Posted by ar...@apache.org.
YARN-8141.  Removed YARN_CONTAINER_RUNTIME_DOCKER_LOCAL_RESOURCE_MOUNTS flag.
            Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d45a0b7d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d45a0b7d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d45a0b7d

Branch: refs/heads/HDDS-48
Commit: d45a0b7d73519acb78cd94ac3186bd8481f6c13e
Parents: 7f083ed
Author: Eric Yang <ey...@apache.org>
Authored: Thu May 17 17:29:34 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Thu May 17 17:29:34 2018 -0400

----------------------------------------------------------------------
 .../containerlaunch/AbstractLauncher.java       | 26 ++++++++++-----
 .../runtime/DockerLinuxContainerRuntime.java    | 35 +++-----------------
 .../runtime/TestDockerContainerRuntime.java     | 31 ++++-------------
 .../src/site/markdown/DockerContainers.md       |  1 -
 4 files changed, 28 insertions(+), 65 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d45a0b7d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java
index dc51b25..da5a8d6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java
@@ -46,6 +46,8 @@ public class AbstractLauncher {
   private static final Logger log =
     LoggerFactory.getLogger(AbstractLauncher.class);
   public static final String CLASSPATH = "CLASSPATH";
+  public static final String ENV_DOCKER_CONTAINER_MOUNTS =
+      "YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS";
   /**
    * Env vars; set up at final launch stage
    */
@@ -153,17 +155,23 @@ public class AbstractLauncher {
         env.put("YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER",
             "true");
       }
-      StringBuilder sb = new StringBuilder();
-      for (Entry<String,String> mount : mountPaths.entrySet()) {
-        if (sb.length() > 0) {
-          sb.append(",");
+      if (!mountPaths.isEmpty()) {
+        StringBuilder sb = new StringBuilder();
+        if (env.get(ENV_DOCKER_CONTAINER_MOUNTS) != null) {
+          // user specified mounts in the spec
+          sb.append(env.get(ENV_DOCKER_CONTAINER_MOUNTS));
         }
-        sb.append(mount.getKey());
-        sb.append(":");
-        sb.append(mount.getValue());
+        for (Entry<String, String> mount : mountPaths.entrySet()) {
+          if (sb.length() > 0) {
+            sb.append(",");
+          }
+          sb.append(mount.getKey()).append(":");
+          sb.append(mount.getValue()).append(":ro");
+        }
+        env.put(ENV_DOCKER_CONTAINER_MOUNTS, sb.toString());
       }
-      env.put("YARN_CONTAINER_RUNTIME_DOCKER_LOCAL_RESOURCE_MOUNTS", sb.toString());
-      log.info("yarn docker env var has been set {}", containerLaunchContext.getEnvironment().toString());
+      log.info("yarn docker env var has been set {}",
+          containerLaunchContext.getEnvironment().toString());
     }
 
     return containerLaunchContext;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d45a0b7d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index a14b085..40cb031 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -153,14 +153,6 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
  *     setting it to false.
  *   </li>
  *   <li>
- *     {@code YARN_CONTAINER_RUNTIME_DOCKER_LOCAL_RESOURCE_MOUNTS} adds
- *     additional volume mounts to the Docker container. The value of the
- *     environment variable should be a comma-separated list of mounts.
- *     All such mounts must be given as {@code source:dest}, where the
- *     source is an absolute path that is not a symlink and that points to a
- *     localized resource.
- *   </li>
- *   <li>
  *     {@code YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS} allows users to specify
  +     additional volume mounts for the Docker container. The value of the
  *     environment variable should be a comma-separated list of mounts.
@@ -227,9 +219,6 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   public static final String ENV_DOCKER_CONTAINER_RUN_ENABLE_USER_REMAPPING =
       "YARN_CONTAINER_RUNTIME_DOCKER_RUN_ENABLE_USER_REMAPPING";
   @InterfaceAudience.Private
-  public static final String ENV_DOCKER_CONTAINER_LOCAL_RESOURCE_MOUNTS =
-      "YARN_CONTAINER_RUNTIME_DOCKER_LOCAL_RESOURCE_MOUNTS";
-  @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_MOUNTS =
       "YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS";
   @InterfaceAudience.Private
@@ -680,8 +669,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     return true;
   }
 
-  @VisibleForTesting
-  protected String validateMount(String mount,
+  private String mountReadOnlyPath(String mount,
       Map<Path, List<String>> localizedResources)
       throws ContainerExecutionException {
     for (Entry<Path, List<String>> resource : localizedResources.entrySet()) {
@@ -817,23 +805,6 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     runCommand.addAllReadOnlyMountLocations(filecacheDirs);
     runCommand.addAllReadOnlyMountLocations(userFilecacheDirs);
 
-    if (environment.containsKey(ENV_DOCKER_CONTAINER_LOCAL_RESOURCE_MOUNTS)) {
-      String mounts = environment.get(
-          ENV_DOCKER_CONTAINER_LOCAL_RESOURCE_MOUNTS);
-      if (!mounts.isEmpty()) {
-        for (String mount : StringUtils.split(mounts)) {
-          String[] dir = StringUtils.split(mount, ':');
-          if (dir.length != 2) {
-            throw new ContainerExecutionException("Invalid mount : " +
-                mount);
-          }
-          String src = validateMount(dir[0], localizedResources);
-          String dst = dir[1];
-          runCommand.addReadOnlyMountLocation(src, dst, true);
-        }
-      }
-    }
-
     if (environment.containsKey(ENV_DOCKER_CONTAINER_MOUNTS)) {
       Matcher parsedMounts = USER_MOUNT_PATTERN.matcher(
           environment.get(ENV_DOCKER_CONTAINER_MOUNTS));
@@ -845,6 +816,10 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       parsedMounts.reset();
       while (parsedMounts.find()) {
         String src = parsedMounts.group(1);
+        java.nio.file.Path srcPath = java.nio.file.Paths.get(src);
+        if (!srcPath.isAbsolute()) {
+          src = mountReadOnlyPath(src, localizedResources);
+        }
         String dst = parsedMounts.group(2);
         String mode = parsedMounts.group(3);
         if (!mode.equals("ro") && !mode.equals("rw")) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d45a0b7d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index 6ad35b2..af69e22 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.registry.client.api.RegistryConstants;
-import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
@@ -1098,7 +1097,7 @@ public class TestDockerContainerRuntime {
     runtime.initialize(conf, nmContext);
 
     env.put(
-        DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_LOCAL_RESOURCE_MOUNTS,
+        DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_MOUNTS,
         "source");
 
     try {
@@ -1118,8 +1117,8 @@ public class TestDockerContainerRuntime {
     runtime.initialize(conf, nmContext);
 
     env.put(
-        DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_LOCAL_RESOURCE_MOUNTS,
-        "test_dir/test_resource_file:test_mount");
+        DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_MOUNTS,
+        "test_dir/test_resource_file:test_mount:ro");
 
     runtime.launchContainer(builder.build());
     PrivilegedOperation op = capturePrivilegedOperationAndVerifyArgs();
@@ -1165,24 +1164,6 @@ public class TestDockerContainerRuntime {
   }
 
   @Test
-  public void testMountInvalid() throws ContainerExecutionException {
-    DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
-        mockExecutor, mockCGroupsHandler);
-    runtime.initialize(conf, nmContext);
-
-    env.put(
-        DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_LOCAL_RESOURCE_MOUNTS,
-        "source:target:other");
-
-    try {
-      runtime.launchContainer(builder.build());
-      Assert.fail("Expected a launch container failure due to invalid mount.");
-    } catch (ContainerExecutionException e) {
-      LOG.info("Caught expected exception : " + e);
-    }
-  }
-
-  @Test
   public void testMountMultiple()
       throws ContainerExecutionException, PrivilegedOperationException,
       IOException {
@@ -1191,9 +1172,9 @@ public class TestDockerContainerRuntime {
     runtime.initialize(conf, nmContext);
 
     env.put(
-        DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_LOCAL_RESOURCE_MOUNTS,
-        "test_dir/test_resource_file:test_mount1," +
-            "test_dir/test_resource_file:test_mount2");
+        DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_MOUNTS,
+        "test_dir/test_resource_file:test_mount1:ro," +
+            "test_dir/test_resource_file:test_mount2:ro");
 
     runtime.launchContainer(builder.build());
     PrivilegedOperation op = capturePrivilegedOperationAndVerifyArgs();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d45a0b7d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
index 423f1da..3c39291 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
@@ -303,7 +303,6 @@ environment variables in the application's environment:
 | `YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK` | Sets the network type to be used by the Docker container. It must be a valid value as determined by the yarn.nodemanager.runtime.linux.docker.allowed-container-networks property. |
 | `YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_PID_NAMESPACE` | Controls which PID namespace will be used by the Docker container. By default, each Docker container has its own PID namespace. To share the namespace of the host, the yarn.nodemanager.runtime.linux.docker.host-pid-namespace.allowed property must be set to true. If the host PID namespace is allowed and this environment variable is set to host, the Docker container will share the host's PID namespace. No other value is allowed. |
 | `YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER` | Controls whether the Docker container is a privileged container. In order to use privileged containers, the yarn.nodemanager.runtime.linux.docker.privileged-containers.allowed property must be set to true, and the application owner must appear in the value of the yarn.nodemanager.runtime.linux.docker.privileged-containers.acl property. If this environment variable is set to true, a privileged Docker container will be used if allowed. No other value is allowed, so the environment variable should be left unset rather than setting it to false. |
-| `YARN_CONTAINER_RUNTIME_DOCKER_LOCAL_RESOURCE_MOUNTS` | Adds additional volume mounts to the Docker container. The value of the environment variable should be a comma-separated list of mounts. All such mounts must be given as "source:dest", where the source is an absolute path that is not a symlink and that points to a localized resource. Note that as of YARN-5298, localized directories are automatically mounted into the container as volumes. |
 | `YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS` | Adds additional volume mounts to the Docker container. The value of the environment variable should be a comma-separated list of mounts. All such mounts must be given as "source:dest:mode" and the mode must be "ro" (read-only) or "rw" (read-write) to specify the type of access being requested. The requested mounts will be validated by container-executor based on the values set in container-executor.cfg for docker.allowed.ro-mounts and docker.allowed.rw-mounts. |
 | `YARN_CONTAINER_RUNTIME_DOCKER_DELAYED_REMOVAL` | Allows a user to request delayed deletion of the Docker container on a per container basis. If true, Docker containers will not be removed until the duration defined by yarn.nodemanager.delete.debug-delay-sec has elapsed. Administrators can disable this feature through the yarn-site property yarn.nodemanager.runtime.linux.docker.delayed-removal.allowed. This feature is disabled by default. When this feature is disabled or set to false, the container will be removed as soon as it exits. |
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: YARN-8080. Add restart policy for YARN services. Contributed by Suma Shivaprasad

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/instance/TestComponentInstance.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/instance/TestComponentInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/instance/TestComponentInstance.java
index 0b56d7e..26e8c93 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/instance/TestComponentInstance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/instance/TestComponentInstance.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -18,56 +18,80 @@
 
 package org.apache.hadoop.yarn.service.component.instance;
 
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.service.ServiceContext;
+import org.apache.hadoop.yarn.service.ServiceScheduler;
 import org.apache.hadoop.yarn.service.ServiceTestUtils;
 import org.apache.hadoop.yarn.service.api.records.Container;
 import org.apache.hadoop.yarn.service.api.records.ContainerState;
+import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.component.Component;
 import org.apache.hadoop.yarn.service.component.ComponentEvent;
 import org.apache.hadoop.yarn.service.component.ComponentEventType;
 import org.apache.hadoop.yarn.service.component.TestComponent;
+import org.apache.hadoop.yarn.service.utils.ServiceUtils;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
+import org.mockito.Mockito;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 /**
  * Tests for {@link ComponentInstance}.
  */
 public class TestComponentInstance {
 
-  @Rule
-  public ServiceTestUtils.ServiceFSWatcher rule =
+  @Rule public ServiceTestUtils.ServiceFSWatcher rule =
       new ServiceTestUtils.ServiceFSWatcher();
 
-  @Test
-  public void testContainerUpgrade() throws Exception {
+  @Test public void testContainerUpgrade() throws Exception {
     ServiceContext context = TestComponent.createTestContext(rule,
         "testContainerUpgrade");
-    Component component = context.scheduler.getAllComponents().entrySet()
-        .iterator().next().getValue();
+    Component component =
+        context.scheduler.getAllComponents().entrySet().iterator().next()
+            .getValue();
     upgradeComponent(component);
 
-    ComponentInstance instance = component.getAllComponentInstances()
-        .iterator().next();
+    ComponentInstance instance =
+        component.getAllComponentInstances().iterator().next();
     ComponentInstanceEvent instanceEvent = new ComponentInstanceEvent(
         instance.getContainer().getId(), ComponentInstanceEventType.UPGRADE);
     instance.handle(instanceEvent);
     Container containerSpec = component.getComponentSpec().getContainer(
         instance.getContainer().getId().toString());
-    Assert.assertEquals("instance not upgrading",
-        ContainerState.UPGRADING, containerSpec.getState());
+    Assert.assertEquals("instance not upgrading", ContainerState.UPGRADING,
+        containerSpec.getState());
   }
 
-  @Test
-  public void testContainerReadyAfterUpgrade() throws Exception {
+  @Test public void testContainerReadyAfterUpgrade() throws Exception {
     ServiceContext context = TestComponent.createTestContext(rule,
         "testContainerStarted");
-    Component component = context.scheduler.getAllComponents().entrySet()
-        .iterator().next().getValue();
+    Component component =
+        context.scheduler.getAllComponents().entrySet().iterator().next()
+            .getValue();
     upgradeComponent(component);
 
-    ComponentInstance instance = component.getAllComponentInstances()
-        .iterator().next();
+    ComponentInstance instance =
+        component.getAllComponentInstances().iterator().next();
 
     ComponentInstanceEvent instanceEvent = new ComponentInstanceEvent(
         instance.getContainer().getId(), ComponentInstanceEventType.UPGRADE);
@@ -75,14 +99,426 @@ public class TestComponentInstance {
 
     instance.handle(new ComponentInstanceEvent(instance.getContainer().getId(),
         ComponentInstanceEventType.BECOME_READY));
-    Assert.assertEquals("instance not ready",
-        ContainerState.READY, instance.getCompSpec().getContainer(
-            instance.getContainer().getId().toString()).getState());
+    Assert.assertEquals("instance not ready", ContainerState.READY,
+        instance.getCompSpec()
+            .getContainer(instance.getContainer().getId().toString())
+            .getState());
   }
 
   private void upgradeComponent(Component component) {
     component.handle(new ComponentEvent(component.getName(),
-        ComponentEventType.UPGRADE)
-        .setTargetSpec(component.getComponentSpec()).setUpgradeVersion("v2"));
+        ComponentEventType.UPGRADE).setTargetSpec(component.getComponentSpec())
+        .setUpgradeVersion("v2"));
+  }
+
+  private Component createComponent(ServiceScheduler scheduler,
+      org.apache.hadoop.yarn.service.api.records.Component.RestartPolicyEnum
+          restartPolicy,
+      int nSucceededInstances, int nFailedInstances, int totalAsk,
+      int componentId) {
+
+    assert (nSucceededInstances + nFailedInstances) <= totalAsk;
+
+    Component comp = mock(Component.class);
+    org.apache.hadoop.yarn.service.api.records.Component componentSpec = mock(
+        org.apache.hadoop.yarn.service.api.records.Component.class);
+    when(componentSpec.getRestartPolicy()).thenReturn(restartPolicy);
+    when(comp.getRestartPolicyHandler()).thenReturn(
+        Component.getRestartPolicyHandler(restartPolicy));
+    when(componentSpec.getNumberOfContainers()).thenReturn(
+        Long.valueOf(totalAsk));
+    when(comp.getComponentSpec()).thenReturn(componentSpec);
+    when(comp.getScheduler()).thenReturn(scheduler);
+
+    Map<String, ComponentInstance> succeeded = new ConcurrentHashMap<>();
+    Map<String, ComponentInstance> failed = new ConcurrentHashMap<>();
+    scheduler.getAllComponents().put("comp" + componentId, comp);
+
+    Map<String, ComponentInstance> componentInstances = new HashMap<>();
+
+    for (int i = 0; i < nSucceededInstances; i++) {
+      ComponentInstance componentInstance = createComponentInstance(comp, i);
+      componentInstances.put(componentInstance.getCompInstanceName(),
+          componentInstance);
+      succeeded.put(componentInstance.getCompInstanceName(), componentInstance);
+    }
+
+    for (int i = 0; i < nFailedInstances; i++) {
+      ComponentInstance componentInstance = createComponentInstance(comp,
+          i + nSucceededInstances);
+      componentInstances.put(componentInstance.getCompInstanceName(),
+          componentInstance);
+      failed.put(componentInstance.getCompInstanceName(), componentInstance);
+    }
+
+    int delta = totalAsk - nFailedInstances - nSucceededInstances;
+
+    for (int i = 0; i < delta; i++) {
+      ComponentInstance componentInstance = createComponentInstance(comp,
+          i + nSucceededInstances + nFailedInstances);
+      componentInstances.put(componentInstance.getCompInstanceName(),
+          componentInstance);
+    }
+
+    when(comp.getAllComponentInstances()).thenReturn(
+        componentInstances.values());
+    when(comp.getSucceededInstances()).thenReturn(succeeded.values());
+    when(comp.getFailedInstances()).thenReturn(failed.values());
+    return comp;
+  }
+
+  private Component createComponent(ServiceScheduler scheduler,
+      org.apache.hadoop.yarn.service.api.records.Component.RestartPolicyEnum
+          restartPolicy,
+      int totalAsk, int componentId) {
+
+    Component comp = mock(Component.class);
+    org.apache.hadoop.yarn.service.api.records.Component componentSpec = mock(
+        org.apache.hadoop.yarn.service.api.records.Component.class);
+    when(componentSpec.getRestartPolicy()).thenReturn(restartPolicy);
+    when(comp.getRestartPolicyHandler()).thenReturn(
+        Component.getRestartPolicyHandler(restartPolicy));
+    when(componentSpec.getNumberOfContainers()).thenReturn(
+        Long.valueOf(totalAsk));
+    when(comp.getComponentSpec()).thenReturn(componentSpec);
+    when(comp.getScheduler()).thenReturn(scheduler);
+
+    scheduler.getAllComponents().put("comp" + componentId, comp);
+
+    Map<String, ComponentInstance> componentInstances = new HashMap<>();
+
+    for (int i = 0; i < totalAsk; i++) {
+      ComponentInstance componentInstance = createComponentInstance(comp, i);
+      componentInstances.put(componentInstance.getCompInstanceName(),
+          componentInstance);
+    }
+
+    when(comp.getAllComponentInstances()).thenReturn(
+        componentInstances.values());
+    return comp;
+  }
+
+  private ComponentInstance createComponentInstance(Component component,
+      int instanceId) {
+
+    ComponentInstance componentInstance = mock(ComponentInstance.class);
+    when(componentInstance.getComponent()).thenReturn(component);
+    when(componentInstance.getCompInstanceName()).thenReturn(
+        "compInstance" + instanceId);
+
+    ServiceUtils.ProcessTerminationHandler terminationHandler = mock(
+        ServiceUtils.ProcessTerminationHandler.class);
+    when(component.getScheduler().getTerminationHandler()).thenReturn(
+        terminationHandler);
+
+    return componentInstance;
+  }
+
+  @Test public void testComponentRestartPolicy() {
+
+    Map<String, Component> allComponents = new HashMap<>();
+    Service mockService = mock(Service.class);
+    ServiceContext serviceContext = mock(ServiceContext.class);
+    when(serviceContext.getService()).thenReturn(mockService);
+    ServiceScheduler serviceSchedulerInstance = new ServiceScheduler(
+        serviceContext);
+    ServiceScheduler serviceScheduler = spy(serviceSchedulerInstance);
+    when(serviceScheduler.getAllComponents()).thenReturn(allComponents);
+    Mockito.doNothing().when(serviceScheduler).setGracefulStop(
+        any(FinalApplicationStatus.class));
+
+    ComponentInstanceEvent componentInstanceEvent = mock(
+        ComponentInstanceEvent.class);
+    ContainerId containerId = ContainerId.newContainerId(ApplicationAttemptId
+        .newInstance(ApplicationId.newInstance(1234L, 1), 1), 1);
+    ContainerStatus containerStatus = ContainerStatus.newInstance(containerId,
+        org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE, "hello", 0);
+
+    when(componentInstanceEvent.getStatus()).thenReturn(containerStatus);
+
+    // Test case1: one component, one instance, restart policy = ALWAYS, exit=0
+    Component comp = createComponent(serviceScheduler,
+        org.apache.hadoop.yarn.service.api.records.Component
+            .RestartPolicyEnum.ALWAYS,
+        1, 0, 1, 0);
+    ComponentInstance componentInstance =
+        comp.getAllComponentInstances().iterator().next();
+
+    ComponentInstance.handleComponentInstanceRelaunch(componentInstance,
+        componentInstanceEvent);
+
+    verify(comp, never()).markAsSucceeded(any(ComponentInstance.class));
+    verify(comp, never()).markAsFailed(any(ComponentInstance.class));
+    verify(comp, times(1)).reInsertPendingInstance(
+        any(ComponentInstance.class));
+    verify(serviceScheduler.getTerminationHandler(), never()).terminate(
+        anyInt());
+
+    // Test case2: one component, one instance, restart policy = ALWAYS, exit=1
+    comp = createComponent(serviceScheduler,
+        org.apache.hadoop.yarn.service.api.records.Component
+            .RestartPolicyEnum.ALWAYS,
+        0, 1, 1, 0);
+    componentInstance = comp.getAllComponentInstances().iterator().next();
+    containerStatus.setExitStatus(1);
+    ComponentInstance.handleComponentInstanceRelaunch(componentInstance,
+        componentInstanceEvent);
+    verify(comp, never()).markAsSucceeded(any(ComponentInstance.class));
+    verify(comp, never()).markAsFailed(any(ComponentInstance.class));
+    verify(comp, times(1)).reInsertPendingInstance(
+        any(ComponentInstance.class));
+    verify(serviceScheduler.getTerminationHandler(), never()).terminate(
+        anyInt());
+
+    // Test case3: one component, one instance, restart policy = NEVER, exit=0
+    // Should exit with code=0
+    comp = createComponent(serviceScheduler,
+        org.apache.hadoop.yarn.service.api.records.Component
+            .RestartPolicyEnum.NEVER,
+        1, 0, 1, 0);
+    componentInstance = comp.getAllComponentInstances().iterator().next();
+    containerStatus.setExitStatus(0);
+
+    Map<String, ComponentInstance> succeededInstances = new HashMap<>();
+    succeededInstances.put(componentInstance.getCompInstanceName(),
+        componentInstance);
+    when(comp.getSucceededInstances()).thenReturn(succeededInstances.values());
+    when(comp.getNumSucceededInstances()).thenReturn(new Long(1));
+
+    ComponentInstance.handleComponentInstanceRelaunch(componentInstance,
+        componentInstanceEvent);
+    verify(comp, times(1)).markAsSucceeded(any(ComponentInstance.class));
+    verify(comp, never()).markAsFailed(any(ComponentInstance.class));
+    verify(comp, times(0)).reInsertPendingInstance(
+        any(ComponentInstance.class));
+    verify(serviceScheduler.getTerminationHandler(), times(1)).terminate(eq(0));
+
+    // Test case4: one component, one instance, restart policy = NEVER, exit=1
+    // Should exit with code=-1
+    comp = createComponent(serviceScheduler,
+        org.apache.hadoop.yarn.service.api.records.Component
+            .RestartPolicyEnum.NEVER,
+        0, 1, 1, 0);
+    componentInstance = comp.getAllComponentInstances().iterator().next();
+    containerStatus.setExitStatus(-1);
+
+    when(comp.getNumFailedInstances()).thenReturn(new Long(1));
+    ComponentInstance.handleComponentInstanceRelaunch(componentInstance,
+        componentInstanceEvent);
+
+    verify(comp, never()).markAsSucceeded(any(ComponentInstance.class));
+    verify(comp, times(1)).markAsFailed(any(ComponentInstance.class));
+    verify(comp, times(0)).reInsertPendingInstance(
+        any(ComponentInstance.class));
+    verify(serviceScheduler.getTerminationHandler(), times(1)).terminate(
+        eq(-1));
+
+    // Test case5: one component, one instance, restart policy = ON_FAILURE,
+    // exit=1
+    // Should continue run.
+    comp = createComponent(serviceScheduler,
+        org.apache.hadoop.yarn.service.api.records.Component
+            .RestartPolicyEnum.ON_FAILURE,
+        0, 1, 1, 0);
+    componentInstance = comp.getAllComponentInstances().iterator().next();
+    containerStatus.setExitStatus(1);
+    ComponentInstance.handleComponentInstanceRelaunch(componentInstance,
+        componentInstanceEvent);
+    verify(comp, never()).markAsSucceeded(any(ComponentInstance.class));
+    verify(comp, never()).markAsFailed(any(ComponentInstance.class));
+    verify(comp, times(1)).reInsertPendingInstance(
+        any(ComponentInstance.class));
+    verify(serviceScheduler.getTerminationHandler(), times(0)).terminate(
+        anyInt());
+
+    // Test case6: one component, 3 instances, restart policy = NEVER, exit=1
+    // 2 of the instances not completed, it should continue run.
+    comp = createComponent(serviceScheduler,
+        org.apache.hadoop.yarn.service.api.records.Component
+            .RestartPolicyEnum.NEVER,
+        0, 1, 3, 0);
+    componentInstance = comp.getAllComponentInstances().iterator().next();
+    containerStatus.setExitStatus(1);
+    ComponentInstance.handleComponentInstanceRelaunch(componentInstance,
+        componentInstanceEvent);
+    verify(comp, never()).markAsSucceeded(any(ComponentInstance.class));
+    verify(comp, times(1)).markAsFailed(any(ComponentInstance.class));
+    verify(comp, times(0)).reInsertPendingInstance(
+        any(ComponentInstance.class));
+    verify(serviceScheduler.getTerminationHandler(), times(0)).terminate(
+        anyInt());
+
+    // Test case7: one component, 3 instances, restart policy = ON_FAILURE,
+    // exit=1
+    // 2 of the instances completed, it should continue run.
+
+    comp = createComponent(serviceScheduler,
+        org.apache.hadoop.yarn.service.api.records.Component
+            .RestartPolicyEnum.ON_FAILURE,
+        0, 1, 3, 0);
+
+    Iterator<ComponentInstance> iter =
+        comp.getAllComponentInstances().iterator();
+
+    containerStatus.setExitStatus(1);
+    ComponentInstance commponentInstance = iter.next();
+    ComponentInstance.handleComponentInstanceRelaunch(commponentInstance,
+        componentInstanceEvent);
+
+    verify(comp, never()).markAsSucceeded(any(ComponentInstance.class));
+    verify(comp, never()).markAsFailed(any(ComponentInstance.class));
+    verify(comp, times(1)).reInsertPendingInstance(
+        any(ComponentInstance.class));
+    verify(serviceScheduler.getTerminationHandler(), times(0)).terminate(
+        anyInt());
+
+    // Test case8: 2 components, 2 instances for each
+    // comp2 already finished.
+    // comp1 has a new instance finish, we should terminate the service
+
+    comp = createComponent(serviceScheduler,
+        org.apache.hadoop.yarn.service.api.records.Component
+            .RestartPolicyEnum.NEVER,
+        2, 0);
+    Collection<ComponentInstance> component1Instances =
+        comp.getAllComponentInstances();
+
+    containerStatus.setExitStatus(-1);
+
+    Component comp2 = createComponent(
+        componentInstance.getComponent().getScheduler(),
+        org.apache.hadoop.yarn.service.api.records.Component
+            .RestartPolicyEnum.NEVER,
+        2, 1);
+
+    Collection<ComponentInstance> component2Instances =
+        comp2.getAllComponentInstances();
+
+    Map<String, ComponentInstance> failed2Instances = new HashMap<>();
+
+    for (ComponentInstance component2Instance : component2Instances) {
+      failed2Instances.put(component2Instance.getCompInstanceName(),
+          component2Instance);
+      when(component2Instance.getComponent().getFailedInstances()).thenReturn(
+          failed2Instances.values());
+      when(component2Instance.getComponent().getNumFailedInstances())
+          .thenReturn(new Long(failed2Instances.size()));
+      ComponentInstance.handleComponentInstanceRelaunch(component2Instance,
+          componentInstanceEvent);
+    }
+
+    Map<String, ComponentInstance> failed1Instances = new HashMap<>();
+
+    // 2nd component, already finished.
+    for (ComponentInstance component1Instance : component1Instances) {
+      failed1Instances.put(component1Instance.getCompInstanceName(),
+          component1Instance);
+      when(component1Instance.getComponent().getFailedInstances()).thenReturn(
+          failed1Instances.values());
+      when(component1Instance.getComponent().getNumFailedInstances())
+          .thenReturn(new Long(failed1Instances.size()));
+      ComponentInstance.handleComponentInstanceRelaunch(component1Instance,
+          componentInstanceEvent);
+    }
+
+    verify(comp, never()).markAsSucceeded(any(ComponentInstance.class));
+    verify(comp, times(2)).markAsFailed(any(ComponentInstance.class));
+    verify(comp, times(0)).reInsertPendingInstance(
+        any(ComponentInstance.class));
+
+    verify(serviceScheduler.getTerminationHandler(), times(1)).terminate(
+        eq(-1));
+
+    // Test case9: 2 components, 2 instances for each
+    // comp2 already finished.
+    // comp1 has a new instance finish, we should terminate the service
+    // All instance finish with 0, service should exit with 0 as well.
+    containerStatus.setExitStatus(0);
+
+    comp = createComponent(serviceScheduler,
+        org.apache.hadoop.yarn.service.api.records.Component
+            .RestartPolicyEnum.ON_FAILURE,
+        2, 0);
+    component1Instances = comp.getAllComponentInstances();
+
+    comp2 = createComponent(componentInstance.getComponent().getScheduler(),
+        org.apache.hadoop.yarn.service.api.records.Component
+            .RestartPolicyEnum.ON_FAILURE,
+        2, 1);
+
+    component2Instances = comp2.getAllComponentInstances();
+
+    Map<String, ComponentInstance> succeeded2Instances = new HashMap<>();
+
+    for (ComponentInstance component2Instance : component2Instances) {
+      succeeded2Instances.put(component2Instance.getCompInstanceName(),
+          component2Instance);
+      when(component2Instance.getComponent().getSucceededInstances())
+          .thenReturn(succeeded2Instances.values());
+      when(component2Instance.getComponent().getNumSucceededInstances())
+          .thenReturn(new Long(succeeded2Instances.size()));
+      ComponentInstance.handleComponentInstanceRelaunch(component2Instance,
+          componentInstanceEvent);
+    }
+
+    Map<String, ComponentInstance> succeeded1Instances = new HashMap<>();
+    // 2nd component, already finished.
+    for (ComponentInstance component1Instance : component1Instances) {
+      succeeded1Instances.put(component1Instance.getCompInstanceName(),
+          component1Instance);
+      when(component1Instance.getComponent().getSucceededInstances())
+          .thenReturn(succeeded1Instances.values());
+      when(component1Instance.getComponent().getNumSucceededInstances())
+          .thenReturn(new Long(succeeded1Instances.size()));
+      ComponentInstance.handleComponentInstanceRelaunch(component1Instance,
+          componentInstanceEvent);
+    }
+
+    verify(comp, times(2)).markAsSucceeded(any(ComponentInstance.class));
+    verify(comp, never()).markAsFailed(any(ComponentInstance.class));
+    verify(componentInstance.getComponent(), times(0)).reInsertPendingInstance(
+        any(ComponentInstance.class));
+    verify(serviceScheduler.getTerminationHandler(), times(1)).terminate(eq(0));
+
+    // Test case10: 2 components, 2 instances for each
+    // comp2 hasn't finished
+    // comp1 finished.
+    // Service should continue run.
+
+    comp = createComponent(serviceScheduler,
+        org.apache.hadoop.yarn.service.api.records.Component
+            .RestartPolicyEnum.NEVER,
+        2, 0);
+    component1Instances = comp.getAllComponentInstances();
+
+    comp2 = createComponent(componentInstance.getComponent().getScheduler(),
+        org.apache.hadoop.yarn.service.api.records.Component
+            .RestartPolicyEnum.NEVER,
+        2, 1);
+
+    component2Instances = comp2.getAllComponentInstances();
+
+    for (ComponentInstance component2Instance : component2Instances) {
+      ComponentInstance.handleComponentInstanceRelaunch(component2Instance,
+          componentInstanceEvent);
+    }
+
+    succeeded1Instances = new HashMap<>();
+    // 2nd component, already finished.
+    for (ComponentInstance component1Instance : component1Instances) {
+      succeeded1Instances.put(component1Instance.getCompInstanceName(),
+          component1Instance);
+      when(component1Instance.getComponent().getSucceededInstances())
+          .thenReturn(succeeded1Instances.values());
+      ComponentInstance.handleComponentInstanceRelaunch(component1Instance,
+          componentInstanceEvent);
+    }
+
+    verify(comp, times(2)).markAsSucceeded(any(ComponentInstance.class));
+    verify(comp, never()).markAsFailed(any(ComponentInstance.class));
+    verify(componentInstance.getComponent(), times(0)).reInsertPendingInstance(
+        any(ComponentInstance.class));
+    verify(serviceScheduler.getTerminationHandler(), never()).terminate(eq(0));
+
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f083ed8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
index c648046..2b567b0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
@@ -233,6 +233,8 @@ One or more components of the service. If the service is HBase say, then the com
 |placement_policy|Advanced scheduling and placement policies for all containers of this component.|false|PlacementPolicy||
 |configuration|Config properties for this component.|false|Configuration||
 |quicklinks|A list of quicklink keys defined at the service level, and to be resolved by this component.|false|string array||
+|restart_policy|Policy of restart component. Including ALWAYS (Always restart
+ component even if instance exit code = 0); ON_FAILURE (Only restart component if instance exit code != 0); NEVER (Do not restart in any cases). Flexing is not supported for components which have restart_policy=ON_FAILURE/NEVER|false|string|ALWAYS|
 
 
 ### ComponentState


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org